Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2015 Microchip Technology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/linkmode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <net/vxlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/irqchip/chained_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/microchipphy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/phy_fixed.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/of_mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "lan78xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define DRIVER_NAME	"lan78xx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define TX_TIMEOUT_JIFFIES		(5 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define THROTTLE_JIFFIES		(HZ / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define UNLINK_TIMEOUT_MS		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define SS_USB_PKT_SIZE			(1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define HS_USB_PKT_SIZE			(512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define FS_USB_PKT_SIZE			(64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define MAX_RX_FIFO_SIZE		(12 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define MAX_TX_FIFO_SIZE		(12 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define DEFAULT_BULK_IN_DELAY		(0x0800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define MAX_SINGLE_PACKET_SIZE		(9000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define DEFAULT_TX_CSUM_ENABLE		(true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define DEFAULT_RX_CSUM_ENABLE		(true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define DEFAULT_TSO_CSUM_ENABLE		(true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define DEFAULT_VLAN_FILTER_ENABLE	(true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define DEFAULT_VLAN_RX_OFFLOAD		(true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define TX_OVERHEAD			(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define RXW_PADDING			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define LAN78XX_USB_VENDOR_ID		(0x0424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define LAN7800_USB_PRODUCT_ID		(0x7800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define LAN7850_USB_PRODUCT_ID		(0x7850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define LAN7801_USB_PRODUCT_ID		(0x7801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define LAN78XX_EEPROM_MAGIC		(0x78A5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define LAN78XX_OTP_MAGIC		(0x78F3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define	MII_READ			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define	MII_WRITE			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define EEPROM_INDICATOR		(0xA5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define EEPROM_MAC_OFFSET		(0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define MAX_EEPROM_SIZE			512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define OTP_INDICATOR_1			(0xF3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define OTP_INDICATOR_2			(0xF7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 					 WAKE_MCAST | WAKE_BCAST | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 					 WAKE_ARP | WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /* USB related defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define BULK_IN_PIPE			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define BULK_OUT_PIPE			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) /* default autosuspend delay (mSec)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) /* statistic update interval (mSec) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define STAT_UPDATE_TIMER		(1 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /* defines interrupts from interrupt EP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define MAX_INT_EP			(32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define INT_EP_INTEP			(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define INT_EP_OTP_WR_DONE		(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define INT_EP_EEE_TX_LPI_START		(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define INT_EP_EEE_TX_LPI_STOP		(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define INT_EP_EEE_RX_LPI		(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define INT_EP_MAC_RESET_TIMEOUT	(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define INT_EP_RDFO			(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define INT_EP_TXE			(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define INT_EP_USB_STATUS		(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define INT_EP_TX_DIS			(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define INT_EP_RX_DIS			(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define INT_EP_PHY			(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define INT_EP_DP			(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define INT_EP_MAC_ERR			(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define INT_EP_TDFU			(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define INT_EP_TDFO			(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define INT_EP_UTX			(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define INT_EP_GPIO_11			(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define INT_EP_GPIO_10			(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define INT_EP_GPIO_9			(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define INT_EP_GPIO_8			(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define INT_EP_GPIO_7			(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define INT_EP_GPIO_6			(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define INT_EP_GPIO_5			(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define INT_EP_GPIO_4			(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define INT_EP_GPIO_3			(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define INT_EP_GPIO_2			(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define INT_EP_GPIO_1			(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define INT_EP_GPIO_0			(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	"RX FCS Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	"RX Alignment Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	"Rx Fragment Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	"RX Jabber Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	"RX Undersize Frame Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	"RX Oversize Frame Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	"RX Dropped Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	"RX Unicast Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	"RX Broadcast Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	"RX Multicast Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	"RX Unicast Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	"RX Broadcast Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	"RX Multicast Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	"RX Pause Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	"RX 64 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	"RX 65 - 127 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	"RX 128 - 255 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	"RX 256 - 511 Bytes Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	"RX 512 - 1023 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	"RX 1024 - 1518 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	"RX Greater 1518 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	"EEE RX LPI Transitions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	"EEE RX LPI Time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	"TX FCS Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	"TX Excess Deferral Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	"TX Carrier Errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	"TX Bad Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	"TX Single Collisions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	"TX Multiple Collisions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	"TX Excessive Collision",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	"TX Late Collisions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	"TX Unicast Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	"TX Broadcast Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	"TX Multicast Byte Count",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	"TX Unicast Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	"TX Broadcast Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	"TX Multicast Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	"TX Pause Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	"TX 64 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	"TX 65 - 127 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	"TX 128 - 255 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	"TX 256 - 511 Bytes Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	"TX 512 - 1023 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	"TX 1024 - 1518 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	"TX Greater 1518 Byte Frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	"EEE TX LPI Transitions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	"EEE TX LPI Time",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) struct lan78xx_statstage {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	u32 rx_fcs_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	u32 rx_alignment_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	u32 rx_fragment_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	u32 rx_jabber_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	u32 rx_undersize_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	u32 rx_oversize_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	u32 rx_dropped_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	u32 rx_unicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	u32 rx_broadcast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	u32 rx_multicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	u32 rx_unicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u32 rx_broadcast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	u32 rx_multicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	u32 rx_pause_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	u32 rx_64_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	u32 rx_65_127_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	u32 rx_128_255_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	u32 rx_256_511_bytes_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	u32 rx_512_1023_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	u32 rx_1024_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	u32 rx_greater_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	u32 eee_rx_lpi_transitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	u32 eee_rx_lpi_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	u32 tx_fcs_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	u32 tx_excess_deferral_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	u32 tx_carrier_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	u32 tx_bad_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	u32 tx_single_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	u32 tx_multiple_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	u32 tx_excessive_collision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	u32 tx_late_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	u32 tx_unicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	u32 tx_broadcast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	u32 tx_multicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	u32 tx_unicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	u32 tx_broadcast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	u32 tx_multicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	u32 tx_pause_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	u32 tx_64_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	u32 tx_65_127_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	u32 tx_128_255_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u32 tx_256_511_bytes_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	u32 tx_512_1023_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	u32 tx_1024_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	u32 tx_greater_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	u32 eee_tx_lpi_transitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	u32 eee_tx_lpi_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) struct lan78xx_statstage64 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	u64 rx_fcs_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	u64 rx_alignment_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	u64 rx_fragment_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	u64 rx_jabber_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	u64 rx_undersize_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	u64 rx_oversize_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	u64 rx_dropped_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	u64 rx_unicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	u64 rx_broadcast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	u64 rx_multicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	u64 rx_unicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	u64 rx_broadcast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u64 rx_multicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	u64 rx_pause_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	u64 rx_64_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	u64 rx_65_127_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	u64 rx_128_255_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	u64 rx_256_511_bytes_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	u64 rx_512_1023_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	u64 rx_1024_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	u64 rx_greater_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	u64 eee_rx_lpi_transitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u64 eee_rx_lpi_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	u64 tx_fcs_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	u64 tx_excess_deferral_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	u64 tx_carrier_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	u64 tx_bad_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u64 tx_single_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	u64 tx_multiple_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	u64 tx_excessive_collision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	u64 tx_late_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	u64 tx_unicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	u64 tx_broadcast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	u64 tx_multicast_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	u64 tx_unicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	u64 tx_broadcast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	u64 tx_multicast_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	u64 tx_pause_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	u64 tx_64_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	u64 tx_65_127_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	u64 tx_128_255_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	u64 tx_256_511_bytes_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	u64 tx_512_1023_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	u64 tx_1024_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	u64 tx_greater_1518_byte_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	u64 eee_tx_lpi_transitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	u64 eee_tx_lpi_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static u32 lan78xx_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	ID_REV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	INT_STS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	HW_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	PMT_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	E2P_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	E2P_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	USB_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	VLAN_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	MAC_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	MAC_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	MAC_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	FLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	ERR_STS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	MII_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	MII_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	EEE_TX_LPI_REQ_DLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	EEE_TW_TX_SYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	EEE_TX_LPI_REM_DLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	WUCSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) #define PHY_REG_SIZE (32 * sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) struct lan78xx_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) struct lan78xx_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct lan78xx_net *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	u32 rfe_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct mutex dataport_mutex; /* for dataport access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	spinlock_t rfe_ctl_lock; /* for rfe register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	struct work_struct set_multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	struct work_struct set_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	u32 wol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) enum skb_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	illegal = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	tx_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	tx_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	rx_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	rx_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	rx_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	unlink_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) struct skb_data {		/* skb->cb is one of these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct lan78xx_net *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	enum skb_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	int num_of_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) struct usb_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	struct usb_ctrlrequest req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	struct lan78xx_net *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) #define EVENT_TX_HALT			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) #define EVENT_RX_HALT			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #define EVENT_RX_MEMORY			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #define EVENT_STS_SPLIT			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #define EVENT_LINK_RESET		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) #define EVENT_RX_PAUSED			5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #define EVENT_DEV_WAKING		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #define EVENT_DEV_ASLEEP		7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) #define EVENT_DEV_OPEN			8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) #define EVENT_STAT_UPDATE		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) struct statstage {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct mutex			access_lock;	/* for stats access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct lan78xx_statstage	saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	struct lan78xx_statstage	rollover_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	struct lan78xx_statstage	rollover_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct lan78xx_statstage64	curr_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) struct irq_domain_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct irq_domain	*irqdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	unsigned int		phyirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	struct irq_chip		*irqchip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	irq_flow_handler_t	irq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	u32			irqenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	struct mutex		irq_lock;		/* for irq bus access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) struct lan78xx_net {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct net_device	*net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct usb_device	*udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	struct usb_interface	*intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	void			*driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	int			rx_qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	int			tx_qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct sk_buff_head	rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct sk_buff_head	txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct sk_buff_head	done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct sk_buff_head	rxq_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	struct sk_buff_head	txq_pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	struct tasklet_struct	bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct delayed_work	wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	int			msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct urb		*urb_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct usb_anchor	deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	struct mutex		phy_mutex; /* for phy access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	unsigned		pipe_in, pipe_out, pipe_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	u32			hard_mtu;	/* count any extra framing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	size_t			rx_urb_size;	/* size for rx urbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	wait_queue_head_t	*wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	unsigned char		suspend_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	unsigned		maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct timer_list	delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct timer_list	stat_monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	unsigned long		data[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int			link_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	u8			mdix_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	u32			chipid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	u32			chiprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct mii_bus		*mdiobus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	phy_interface_t		interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	int			fc_autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	u8			fc_request_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	int			delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct statstage	stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	struct irq_domain_data	domain_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /* define external phy id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) #define	PHY_LAN8835			(0x0007C130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #define	PHY_KSZ9031RNX			(0x00221620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) /* use ethtool to change the level for any given device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static int msg_level = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) module_param(msg_level, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) MODULE_PARM_DESC(msg_level, "Override default message level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			      USB_VENDOR_REQUEST_READ_REGISTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (likely(ret >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		le32_to_cpus(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		*data = *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			    "Failed to read register index 0x%08x. ret = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			    index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	*buf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	cpu_to_le32s(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			    "Failed to write register index 0x%08x. ret = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			    index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static int lan78xx_read_stats(struct lan78xx_net *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			      struct lan78xx_statstage *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct lan78xx_statstage *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	u32 *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	u32 *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (!stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	ret = usb_control_msg(dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			      usb_rcvctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			      USB_VENDOR_REQUEST_GET_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			      0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			      0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			      (void *)stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			      sizeof(*stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			      USB_CTRL_SET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	if (likely(ret >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		src = (u32 *)stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		dst = (u32 *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			le32_to_cpus(&src[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			dst[i] = src[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			    "Failed to read stat ret = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	kfree(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) #define check_counter_rollover(struct1, dev_stats, member) {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (struct1->member < dev_stats.saved.member)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		dev_stats.rollover_count.member++;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 					struct lan78xx_statstage *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void lan78xx_update_stats(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	u32 *p, *count, *max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	u64 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct lan78xx_statstage lan78xx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (usb_autopm_get_interface(dev->intf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	p = (u32 *)&lan78xx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	count = (u32 *)&dev->stats.rollover_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	max = (u32 *)&dev->stats.rollover_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	data = (u64 *)&dev->stats.curr_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	mutex_lock(&dev->stats.access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	mutex_unlock(&dev->stats.access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) /* Loop until the read is completed with timeout called with phy_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	unsigned long start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		if (!(val & MII_ACC_MII_BUSY_))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	} while (!time_after(jiffies, start_time + HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static inline u32 mii_access(int id, int index, int read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		ret |= MII_ACC_MII_READ_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		ret |= MII_ACC_MII_WRITE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	ret |= MII_ACC_MII_BUSY_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	unsigned long start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		if (!(val & E2P_CMD_EPC_BUSY_) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		    (val & E2P_CMD_EPC_TIMEOUT_))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		usleep_range(40, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	} while (!time_after(jiffies, start_time + HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		netdev_warn(dev->net, "EEPROM read operation timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	unsigned long start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		if (!(val & E2P_CMD_EPC_BUSY_))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		usleep_range(40, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	} while (!time_after(jiffies, start_time + HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	netdev_warn(dev->net, "EEPROM is busy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				   u32 length, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	u32 saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	/* depends on chip, some EEPROM pins are muxed with LED function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * disable & restore LED function to access EEPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	saved = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		ret = lan78xx_write_reg(dev, HW_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	retval = lan78xx_eeprom_confirm_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	for (i = 0; i < length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		retval = lan78xx_wait_eeprom(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		data[i] = val & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			       u32 length, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	u8 sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				    u32 length, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	u32 saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/* depends on chip, some EEPROM pins are muxed with LED function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * disable & restore LED function to access EEPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	saved = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		ret = lan78xx_write_reg(dev, HW_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	retval = lan78xx_eeprom_confirm_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	/* Issue write/erase enable command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	retval = lan78xx_wait_eeprom(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	for (i = 0; i < length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		/* Fill data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		val = data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		/* Send "write" command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		retval = lan78xx_wait_eeprom(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				u32 length, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (buf & OTP_PWR_DN_PWRDN_N_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		/* clear it and wait to be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			usleep_range(1, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 					    "timeout on OTP_PWR_DN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		} while (buf & OTP_PWR_DN_PWRDN_N_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	for (i = 0; i < length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 					((offset + i) >> 8) & OTP_ADDR1_15_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 					((offset + i) & OTP_ADDR2_10_3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 				netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 					    "timeout on OTP_STATUS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		} while (buf & OTP_STATUS_BUSY_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		data[i] = (u8)(buf & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				 u32 length, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (buf & OTP_PWR_DN_PWRDN_N_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		/* clear it and wait to be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 					    "timeout on OTP_PWR_DN completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		} while (buf & OTP_PWR_DN_PWRDN_N_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	/* set to BYTE program mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	for (i = 0; i < length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 					((offset + i) >> 8) & OTP_ADDR1_15_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 					((offset + i) & OTP_ADDR2_10_3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 				netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 					    "Timeout on OTP_STATUS completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		} while (buf & OTP_STATUS_BUSY_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			    u32 length, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	u8 sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		if (sig == OTP_INDICATOR_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			offset += 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		else if (sig != OTP_INDICATOR_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		u32 dp_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		if (dp_sel & DP_SEL_DPRDY_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		usleep_range(40, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 				  u32 addr, u32 length, u32 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	u32 dp_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (usb_autopm_get_interface(dev->intf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	mutex_lock(&pdata->dataport_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	ret = lan78xx_dataport_wait_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	dp_sel &= ~DP_SEL_RSEL_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	dp_sel |= ram_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	for (i = 0; i < length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		ret = lan78xx_dataport_wait_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	mutex_unlock(&pdata->dataport_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				    int index, u8 addr[ETH_ALEN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		temp = addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		temp = addr[2] | (temp << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		temp = addr[1] | (temp << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		temp = addr[0] | (temp << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		pdata->pfilter_table[index][1] = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		temp = addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		temp = addr[4] | (temp << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		pdata->pfilter_table[index][0] = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* returns hash bit number for given MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static inline u32 lan78xx_hash(char addr[ETH_ALEN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void lan78xx_deferred_multicast_write(struct work_struct *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct lan78xx_priv *pdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			container_of(param, struct lan78xx_priv, set_multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct lan78xx_net *dev = pdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		  pdata->rfe_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	for (i = 1; i < NUM_OF_MAF; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		ret = lan78xx_write_reg(dev, MAF_LO(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 					pdata->pfilter_table[i][1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		ret = lan78xx_write_reg(dev, MAF_HI(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 					pdata->pfilter_table[i][0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static void lan78xx_set_multicast(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			pdata->mchash_table[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	/* pfilter_table[0] has own HW address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	for (i = 1; i < NUM_OF_MAF; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			pdata->pfilter_table[i][0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			pdata->pfilter_table[i][1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (dev->net->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		if (dev->net->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			netif_dbg(dev, drv, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 				  "receive all multicast enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (netdev_mc_count(dev->net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		netdev_for_each_mc_addr(ha, netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			/* set first 32 into Perfect Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			if (i < 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 				lan78xx_set_addr_filter(pdata, i, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 				u32 bitnum = lan78xx_hash(ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 				pdata->mchash_table[bitnum / 32] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 							(1 << (bitnum % 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	/* defer register writes to a sleepable context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	schedule_work(&pdata->set_multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				      u16 lcladv, u16 rmtadv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	u32 flow = 0, fct_flow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	u8 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (dev->fc_autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		cap = dev->fc_request_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (cap & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	if (cap & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		flow |= FLOW_CR_RX_FCEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (dev->udev->speed == USB_SPEED_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		fct_flow = 0x817;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	else if (dev->udev->speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		fct_flow = 0x211;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	/* threshold value should be set before enabling flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	ret = lan78xx_write_reg(dev, FLOW, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static int lan78xx_link_reset(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	struct phy_device *phydev = dev->net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	struct ethtool_link_ksettings ecmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	int ladv, radv, ret, link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	/* clear LAN78xx interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	mutex_lock(&phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	phy_read_status(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	link = phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	mutex_unlock(&phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (!link && dev->link_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		dev->link_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		/* reset MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		buf |= MAC_CR_RST_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		del_timer(&dev->stat_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	} else if (link && !dev->link_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		dev->link_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		phy_ethtool_ksettings_get(phydev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		if (dev->udev->speed == USB_SPEED_SUPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			if (ecmd.base.speed == 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				/* disable U2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 				/* enable U1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				/* enable U1 & U2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		ladv = phy_read(phydev, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		if (ladv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			return ladv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		radv = phy_read(phydev, MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		if (radv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			return radv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		netif_dbg(dev, link, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 						 radv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		if (!timer_pending(&dev->stat_monitor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			dev->delta = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			mod_timer(&dev->stat_monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 				  jiffies + STAT_UPDATE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* some work can't be done in tasklets, so we use keventd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * but tasklet_schedule() doesn't.	hope the failure is rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	set_bit(work, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (!schedule_delayed_work(&dev->wq, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	u32 intdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (urb->actual_length != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			    "unexpected urb length %d", urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	intdata = get_unaligned_le32(urb->transfer_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (intdata & INT_ENP_PHY_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		if (dev->domain_data.phyirq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			generic_handle_irq(dev->domain_data.phyirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			    "unexpected interrupt: 0x%08x\n", intdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	return MAX_EEPROM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				      struct ethtool_eeprom *ee, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	ee->magic = LAN78XX_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 				      struct ethtool_eeprom *ee, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	 * to load data from EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		 (ee->offset == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		 (ee->len == 512) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		 (data[0] == OTP_INDICATOR_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 				u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (stringset == ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (sset == ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		return ARRAY_SIZE(lan78xx_gstrings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static void lan78xx_get_stats(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			      struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	lan78xx_update_stats(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	mutex_lock(&dev->stats.access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	mutex_unlock(&dev->stats.access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void lan78xx_get_wol(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			    struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (usb_autopm_get_interface(dev->intf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		wol->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		if (buf & USB_CFG_RMT_WKP_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			wol->supported = WAKE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			wol->wolopts = pdata->wol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			wol->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int lan78xx_set_wol(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			   struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (wol->wolopts & ~WAKE_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	pdata->wol = wol->wolopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	phy_ethtool_set_wol(netdev->phydev, wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	struct phy_device *phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	ret = phy_ethtool_get_eee(phydev, edata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (buf & MAC_CR_EEE_EN_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		edata->eee_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		edata->eee_active = !!(edata->advertised &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 				       edata->lp_advertised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		edata->tx_lpi_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		edata->tx_lpi_timer = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		edata->eee_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		edata->eee_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		edata->tx_lpi_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		edata->tx_lpi_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (edata->eee_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		buf |= MAC_CR_EEE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		phy_ethtool_set_eee(net->phydev, edata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		buf = (u32)edata->tx_lpi_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		buf &= ~MAC_CR_EEE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static u32 lan78xx_get_link(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	u32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	mutex_lock(&net->phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	phy_read_status(net->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	link = net->phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	mutex_unlock(&net->phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static void lan78xx_get_drvinfo(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 				struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static u32 lan78xx_get_msglevel(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	return dev->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void lan78xx_set_msglevel(struct net_device *net, u32 level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	dev->msg_enable = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static int lan78xx_get_link_ksettings(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 				      struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	struct phy_device *phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	phy_ethtool_ksettings_get(phydev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static int lan78xx_set_link_ksettings(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 				      const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	struct phy_device *phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	/* change speed & duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	ret = phy_ethtool_ksettings_set(phydev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (!cmd->base.autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		/* force link down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		temp = phy_read(phydev, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		phy_write(phydev, MII_BMCR, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) static void lan78xx_get_pause(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 			      struct ethtool_pauseparam *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	struct phy_device *phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	struct ethtool_link_ksettings ecmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	phy_ethtool_ksettings_get(phydev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	pause->autoneg = dev->fc_autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (dev->fc_request_control & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		pause->tx_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (dev->fc_request_control & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		pause->rx_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static int lan78xx_set_pause(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			     struct ethtool_pauseparam *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	struct phy_device *phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct ethtool_link_ksettings ecmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	phy_ethtool_ksettings_get(phydev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (pause->autoneg && !ecmd.base.autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	dev->fc_request_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if (pause->rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		dev->fc_request_control |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	if (pause->tx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		dev->fc_request_control |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if (ecmd.base.autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		u32 mii_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 				   ecmd.link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 				   ecmd.link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		linkmode_or(ecmd.link_modes.advertising, fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			    ecmd.link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		phy_ethtool_ksettings_set(phydev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	dev->fc_autoneg = pause->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static int lan78xx_get_regs_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (!netdev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		return (sizeof(lan78xx_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		 void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	u32 *data = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	/* Read Device/MAC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	if (!netdev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	/* Read PHY registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	for (j = 0; j < 32; i++, j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		data[i] = phy_read(netdev->phydev, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static const struct ethtool_ops lan78xx_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	.get_link	= lan78xx_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	.nway_reset	= phy_ethtool_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	.get_drvinfo	= lan78xx_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	.get_msglevel	= lan78xx_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	.set_msglevel	= lan78xx_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	.get_ethtool_stats = lan78xx_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	.get_sset_count = lan78xx_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	.get_strings	= lan78xx_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	.get_wol	= lan78xx_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	.set_wol	= lan78xx_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	.get_eee	= lan78xx_get_eee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	.set_eee	= lan78xx_set_eee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	.get_pauseparam	= lan78xx_get_pause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	.set_pauseparam	= lan78xx_set_pause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	.get_link_ksettings = lan78xx_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	.set_link_ksettings = lan78xx_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	.get_regs_len	= lan78xx_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	.get_regs	= lan78xx_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) static void lan78xx_init_mac_address(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	u32 addr_lo, addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	u8 addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	addr[0] = addr_lo & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	addr[1] = (addr_lo >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	addr[2] = (addr_lo >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	addr[3] = (addr_lo >> 24) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	addr[4] = addr_hi & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	addr[5] = (addr_hi >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	if (!is_valid_ether_addr(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			/* valid address present in Device Tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			netif_dbg(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				  "MAC address read from Device Tree");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 						 ETH_ALEN, addr) == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 					      ETH_ALEN, addr) == 0)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			   is_valid_ether_addr(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			/* eeprom values are valid so use them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			netif_dbg(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				  "MAC address read from EEPROM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			/* generate random MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			eth_random_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			netif_dbg(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				  "MAC address set to random addr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		addr_lo = addr[0] | (addr[1] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			  (addr[2] << 16) | (addr[3] << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		addr_hi = addr[4] | (addr[5] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	ether_addr_copy(dev->net->dev_addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /* MDIO read and write wrappers for phylib */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	struct lan78xx_net *dev = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	u32 val, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	mutex_lock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	/* confirm MII not busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	ret = lan78xx_phy_wait_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	/* set the address, index & direction (read from PHY) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	addr = mii_access(phy_id, idx, MII_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	ret = lan78xx_phy_wait_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	ret = (int)(val & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 				 u16 regval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	struct lan78xx_net *dev = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	u32 val, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	mutex_lock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	/* confirm MII not busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	ret = lan78xx_phy_wait_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	val = (u32)regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	ret = lan78xx_write_reg(dev, MII_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	/* set the address, index & direction (write to PHY) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	addr = mii_access(phy_id, idx, MII_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	ret = lan78xx_phy_wait_not_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) static int lan78xx_mdio_init(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	dev->mdiobus = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	if (!dev->mdiobus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		netdev_err(dev->net, "can't allocate MDIO bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	dev->mdiobus->priv = (void *)dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	dev->mdiobus->read = lan78xx_mdiobus_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	dev->mdiobus->write = lan78xx_mdiobus_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	dev->mdiobus->name = "lan78xx-mdiobus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	dev->mdiobus->parent = &dev->udev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		 dev->udev->bus->busnum, dev->udev->devnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	switch (dev->chipid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	case ID_REV_CHIP_ID_7800_:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	case ID_REV_CHIP_ID_7850_:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		/* set to internal PHY id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		dev->mdiobus->phy_mask = ~(1 << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	case ID_REV_CHIP_ID_7801_:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		/* scan thru PHYAD[2..0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		dev->mdiobus->phy_mask = ~(0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	ret = of_mdiobus_register(dev->mdiobus, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		netdev_err(dev->net, "can't register MDIO bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) exit1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	mdiobus_free(dev->mdiobus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static void lan78xx_remove_mdio(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	mdiobus_unregister(dev->mdiobus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	mdiobus_free(dev->mdiobus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) static void lan78xx_link_status_change(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	struct phy_device *phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	int ret, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	/* At forced 100 F/H mode, chip may fail to set mode correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	 * when cable is switched between long(~50+m) and short one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	 * As workaround, set to 10 before setting to 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	 * at forced 100 F/H mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	if (!phydev->autoneg && (phydev->speed == 100)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		/* disable phy interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		temp = phy_read(phydev, LAN88XX_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		temp = phy_read(phydev, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		temp |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		/* clear pending interrupt generated while workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		temp = phy_read(phydev, LAN88XX_INT_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		/* enable phy interrupt back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		temp = phy_read(phydev, LAN88XX_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) static int irq_map(struct irq_domain *d, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		   irq_hw_number_t hwirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	struct irq_domain_data *data = d->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	irq_set_chip_data(irq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	irq_set_noprobe(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static void irq_unmap(struct irq_domain *d, unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	irq_set_chip_and_handler(irq, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	irq_set_chip_data(irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static const struct irq_domain_ops chip_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	.map	= irq_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	.unmap	= irq_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static void lan78xx_irq_mask(struct irq_data *irqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static void lan78xx_irq_unmask(struct irq_data *irqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static void lan78xx_irq_bus_lock(struct irq_data *irqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	mutex_lock(&data->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	struct lan78xx_net *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			container_of(data, struct lan78xx_net, domain_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	 * are only two callbacks executed in non-atomic contex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (buf != data->irqenable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	mutex_unlock(&data->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) static struct irq_chip lan78xx_irqchip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	.name			= "lan78xx-irqs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	.irq_mask		= lan78xx_irq_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	.irq_unmask		= lan78xx_irq_unmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	.irq_bus_lock		= lan78xx_irq_bus_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	struct device_node *of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	struct irq_domain *irqdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	unsigned int irqmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	of_node = dev->udev->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	mutex_init(&dev->domain_data.irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	dev->domain_data.irqenable = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	dev->domain_data.irqchip = &lan78xx_irqchip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	dev->domain_data.irq_handler = handle_simple_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 					  &chip_domain_ops, &dev->domain_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	if (irqdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		/* create mapping for PHY interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		if (!irqmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			irq_domain_remove(irqdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			irqdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	dev->domain_data.irqdomain = irqdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	dev->domain_data.phyirq = irqmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	if (dev->domain_data.phyirq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		irq_dispose_mapping(dev->domain_data.phyirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		if (dev->domain_data.irqdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			irq_domain_remove(dev->domain_data.irqdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	dev->domain_data.phyirq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	dev->domain_data.irqdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) static int lan8835_fixup(struct phy_device *phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	int buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	buf &= ~0x1800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	buf |= 0x0800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	/* RGMII MAC TXC Delay Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 				MAC_RGMII_ID_TXC_DELAY_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	/* RGMII TX DLL Tune Adjust */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) static int ksz9031rnx_fixup(struct phy_device *phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	/* Micrel9301RNX PHY configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	/* RGMII Control Signal Pad Skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	/* RGMII RX Data Pad Skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	/* RGMII RX Clock Pad Skew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	struct fixed_phy_status fphy_status = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		.link = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		.speed = SPEED_1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		.duplex = DUPLEX_FULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	phydev = phy_find_first(dev->mdiobus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	if (!phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		if (IS_ERR(phydev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		dev->interface = PHY_INTERFACE_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 					MAC_RGMII_ID_TXC_DELAY_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		buf |= HW_CFG_CLK125_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		buf |= HW_CFG_REFCLK25_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		if (!phydev->drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			netdev_err(dev->net, "no PHY driver found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		dev->interface = PHY_INTERFACE_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		/* external PHY fixup for KSZ9031RNX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 						 ksz9031rnx_fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		/* external PHY fixup for LAN8835 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 						 lan8835_fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		/* add more external PHY fixup here if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		phydev->is_internal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	return phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) static int lan78xx_phy_init(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	u32 mii_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	switch (dev->chipid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	case ID_REV_CHIP_ID_7801_:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		phydev = lan7801_phy_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		if (!phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			netdev_err(dev->net, "lan7801: PHY Init Failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	case ID_REV_CHIP_ID_7800_:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	case ID_REV_CHIP_ID_7850_:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		phydev = phy_find_first(dev->mdiobus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		if (!phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			netdev_err(dev->net, "no PHY found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		phydev->is_internal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		dev->interface = PHY_INTERFACE_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		netdev_err(dev->net, "Unknown CHIP ID found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	/* if phyirq is not set, use polling mode in phylib */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	if (dev->domain_data.phyirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		phydev->irq = dev->domain_data.phyirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		phydev->irq = PHY_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	/* set to AUTOMDIX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	phydev->mdix = ETH_TP_MDI_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	ret = phy_connect_direct(dev->net, phydev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 				 lan78xx_link_status_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 				 dev->interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		netdev_err(dev->net, "can't attach PHY to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 			   dev->mdiobus->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 			if (phy_is_pseudo_fixed_link(phydev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 				fixed_phy_unregister(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 							     0xfffffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 				phy_unregister_fixup_for_uid(PHY_LAN8835,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 							     0xfffffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	/* MAC doesn't support 1000T Half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	/* support both flow controls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			   phydev->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			   phydev->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	linkmode_or(phydev->advertising, fc, phydev->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (phydev->mdio.dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 						      "microchip,led-modes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 						      sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		if (len >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			/* Ensure the appropriate LEDs are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			lan78xx_read_reg(dev, HW_CFG, &reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			reg &= ~(HW_CFG_LED0_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				 HW_CFG_LED1_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 				 HW_CFG_LED2_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				 HW_CFG_LED3_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 				(len > 1) * HW_CFG_LED1_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				(len > 2) * HW_CFG_LED2_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 				(len > 3) * HW_CFG_LED3_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 			lan78xx_write_reg(dev, HW_CFG, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	genphy_config_aneg(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	dev->fc_autoneg = phydev->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	bool rxenabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	if (rxenabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		buf &= ~MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	/* add 4 to size for FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	if (rxenabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		buf |= MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	spin_lock_irqsave(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	while (!skb_queue_empty(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		struct skb_data	*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		skb_queue_walk(q, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 			entry = (struct skb_data *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			if (entry->state != unlink_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 				goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		entry->state = unlink_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		urb = entry->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		/* Get reference count of the URB to avoid it to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		 * freed during usb_unlink_urb, which may trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		 * use-after-free problem inside usb_unlink_urb since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		 * usb_unlink_urb is always racing with .complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		 * handler(include defer_bh).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		usb_get_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		spin_unlock_irqrestore(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		/* during some PM-driven resume scenarios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		 * these (async) unlinks complete immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		ret = usb_unlink_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		if (ret != -EINPROGRESS && ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		usb_put_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		spin_lock_irqsave(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	spin_unlock_irqrestore(&q->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	int ll_mtu = new_mtu + netdev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	int old_hard_mtu = dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	int old_rx_urb_size = dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	/* no second zero-length packet read wanted after mtu-sized packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	if ((ll_mtu % dev->maxpacket) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		return -EDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	netdev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if (dev->rx_urb_size == old_hard_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		dev->rx_urb_size = dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		if (dev->rx_urb_size > old_rx_urb_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			if (netif_running(dev->net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 				unlink_urbs(dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 				tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	u32 addr_lo, addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	addr_lo = netdev->dev_addr[0] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		  netdev->dev_addr[1] << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		  netdev->dev_addr[2] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		  netdev->dev_addr[3] << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	addr_hi = netdev->dev_addr[4] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		  netdev->dev_addr[5] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	/* Added to support MAC address changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) /* Enable or disable Rx checksum offload engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) static int lan78xx_set_features(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 				netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	if (features & NETIF_F_RXCSUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) static void lan78xx_deferred_vlan_write(struct work_struct *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	struct lan78xx_priv *pdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			container_of(param, struct lan78xx_priv, set_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	struct lan78xx_net *dev = pdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 				   __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	u16 vid_bit_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	u16 vid_dword_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	vid_dword_index = (vid >> 5) & 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	vid_bit_index = vid & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	/* defer register writes to a sleepable context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	schedule_work(&pdata->set_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 				    __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	struct lan78xx_net *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	u16 vid_bit_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	u16 vid_dword_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	vid_dword_index = (vid >> 5) & 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	vid_bit_index = vid & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	/* defer register writes to a sleepable context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	schedule_work(&pdata->set_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) static void lan78xx_init_ltm(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	u32 regs[6] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	if (buf & USB_CFG1_LTM_ENABLE_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		u8 temp[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		/* Get values from EEPROM first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 			if (temp[0] == 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 				ret = lan78xx_read_raw_eeprom(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 							      temp[1] * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 							      24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 							      (u8 *)regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			if (temp[0] == 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 				ret = lan78xx_read_raw_otp(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 							   temp[1] * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 							   24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 							   (u8 *)regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) static int lan78xx_reset(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	u8 sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	buf |= HW_CFG_LRST_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 				    "timeout on completion of LiteReset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	} while (buf & HW_CFG_LRST_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	lan78xx_init_mac_address(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	/* save DEVID for later usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	/* Respond to the IN token with a NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	buf |= USB_CFG_BIR_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	/* Init LTM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	lan78xx_init_ltm(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	if (dev->udev->speed == USB_SPEED_SUPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		dev->rx_qlen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		dev->tx_qlen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		dev->rx_qlen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		dev->tx_qlen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	buf |= HW_CFG_MEF_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	buf |= USB_CFG_BCE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	/* set FIFO sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	ret = lan78xx_write_reg(dev, FLOW, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	/* Don't need rfe_ctl_lock during initialisation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	/* Enable or disable checksum offload engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	lan78xx_set_features(dev->net, dev->net->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	lan78xx_set_multicast(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	/* reset PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	buf |= PMT_CTL_PHY_RST_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	/* LAN7801 only has RGMII mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		buf &= ~MAC_CR_GMII_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		if (!ret && sig != EEPROM_INDICATOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 			/* Implies there is no external eeprom. Set mac speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	buf |= MAC_TX_TXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	buf |= FCT_TX_CTL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	ret = lan78xx_set_rx_max_frame_length(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 					      dev->net->mtu + VLAN_ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	buf |= MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	buf |= FCT_RX_CTL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) static void lan78xx_init_stats(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	/* initialize for stats update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	 * some counters are 20bits and some are 32bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	p = (u32 *)&dev->stats.rollover_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		p[i] = 0xFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static int lan78xx_open(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	ret = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	phy_start(net->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	/* for Link Check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	if (dev->urb_intr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 			netif_err(dev, ifup, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 				  "intr submit %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	lan78xx_init_stats(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	set_bit(EVENT_DEV_OPEN, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	netif_start_queue(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	dev->link_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	/* ensure there are no more active urbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	add_wait_queue(&unlink_wakeup, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	dev->wait = &unlink_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	/* maybe wait for deletions to finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	while (!skb_queue_empty(&dev->rxq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	       !skb_queue_empty(&dev->txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	       !skb_queue_empty(&dev->done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		netif_dbg(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			  "waited for %d urb completions\n", temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	dev->wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	remove_wait_queue(&unlink_wakeup, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) static int lan78xx_stop(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	if (timer_pending(&dev->stat_monitor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		del_timer_sync(&dev->stat_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	if (net->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		phy_stop(net->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	netif_stop_queue(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	netif_info(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		   net->stats.rx_packets, net->stats.tx_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		   net->stats.rx_errors, net->stats.tx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	lan78xx_terminate_urbs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	usb_kill_urb(dev->urb_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	skb_queue_purge(&dev->rxq_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	/* deferred work (task, timer, softirq) must also stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	 * can't flush_scheduled_work() until we drop rtnl (later),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	 * else workers could deadlock; so make workers a NOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	dev->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	cancel_delayed_work_sync(&dev->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	tasklet_kill(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 				       struct sk_buff *skb, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	u32 tx_cmd_a, tx_cmd_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	if (skb_cow_head(skb, TX_OVERHEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	if (skb_linearize(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	tx_cmd_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		tx_cmd_a |= TX_CMD_A_LSO_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		tx_cmd_a |= TX_CMD_A_IVTG_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	ptr = skb_push(skb, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	put_unaligned_le32(tx_cmd_a, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	put_unaligned_le32(tx_cmd_b, ptr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 			       struct sk_buff_head *list, enum skb_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	enum skb_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	struct skb_data *entry = (struct skb_data *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	spin_lock_irqsave(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	old_state = entry->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	entry->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	__skb_unlink(skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	spin_unlock(&list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	spin_lock(&dev->done.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	__skb_queue_tail(&dev->done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	if (skb_queue_len(&dev->done) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	spin_unlock_irqrestore(&dev->done.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	return old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static void tx_complete(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	struct sk_buff *skb = (struct sk_buff *)urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	struct skb_data *entry = (struct skb_data *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	struct lan78xx_net *dev = entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	if (urb->status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		dev->net->stats.tx_packets += entry->num_of_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		dev->net->stats.tx_bytes += entry->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		dev->net->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		switch (urb->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		/* software-driven interface shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		case -ESHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		case -EPROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		case -EILSEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 			netif_stop_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 			netif_dbg(dev, tx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 				  "tx err %d\n", entry->urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	defer_bh(dev, skb, &dev->txq, tx_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static void lan78xx_queue_skb(struct sk_buff_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			      struct sk_buff *newsk, enum skb_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	struct skb_data *entry = (struct skb_data *)newsk->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	__skb_queue_tail(list, newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	entry->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	struct sk_buff *skb2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	if (skb2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		skb_queue_tail(&dev->txq_pend, skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		/* throttle TX patch at slower than SUPER SPEED USB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 		    (skb_queue_len(&dev->txq_pend) > 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 			netif_stop_queue(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		netif_dbg(dev, tx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 			  "lan78xx_tx_prep return NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		dev->net->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		dev->net->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	struct lan78xx_priv *pdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	if (!pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	pdata->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	spin_lock_init(&pdata->rfe_ctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	mutex_init(&pdata->dataport_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		pdata->vlan_table[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	dev->net->features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	if (DEFAULT_TX_CSUM_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		dev->net->features |= NETIF_F_HW_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	if (DEFAULT_RX_CSUM_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		dev->net->features |= NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	if (DEFAULT_TSO_CSUM_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	if (DEFAULT_VLAN_RX_OFFLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	if (DEFAULT_VLAN_FILTER_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	dev->net->hw_features = dev->net->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	ret = lan78xx_setup_irq_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		netdev_warn(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 			    "lan78xx_setup_irq_domain() failed : %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	dev->net->hard_header_len += TX_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	/* Init all registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	ret = lan78xx_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		netdev_warn(dev->net, "Registers INIT FAILED....");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	ret = lan78xx_mdio_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	dev->net->flags |= IFF_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	pdata->wol = WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	lan78xx_remove_irq_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	netdev_warn(dev->net, "Bind routine FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	cancel_work_sync(&pdata->set_multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	cancel_work_sync(&pdata->set_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	kfree(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	lan78xx_remove_irq_domain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	lan78xx_remove_mdio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	if (pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		cancel_work_sync(&pdata->set_multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 		cancel_work_sync(&pdata->set_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 		netif_dbg(dev, ifdown, dev->net, "free pdata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		kfree(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		pdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		dev->data[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 				    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 				    u32 rx_cmd_a, u32 rx_cmd_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	/* HW Checksum offload appears to be flawed if used when not stripping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	 * VLAN headers. Drop back to S/W checksums under these conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		skb->ip_summed = CHECKSUM_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 				    struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 				    u32 rx_cmd_a, u32 rx_cmd_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	    (rx_cmd_a & RX_CMD_A_FVTG_))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 				       (rx_cmd_b & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		skb_queue_tail(&dev->rxq_pause, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	dev->net->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	dev->net->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	skb->protocol = eth_type_trans(skb, dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 		  skb->len + sizeof(struct ethhdr), skb->protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	memset(skb->cb, 0, sizeof(struct skb_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	if (skb_defer_rx_timestamp(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	status = netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (status != NET_RX_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		netif_dbg(dev, rx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 			  "netif_rx status %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	if (skb->len < dev->net->hard_header_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	while (skb->len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		u16 rx_cmd_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		struct sk_buff *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		unsigned char *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 		rx_cmd_a = get_unaligned_le32(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		skb_pull(skb, sizeof(rx_cmd_a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 		rx_cmd_b = get_unaligned_le32(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		skb_pull(skb, sizeof(rx_cmd_b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		rx_cmd_c = get_unaligned_le16(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		skb_pull(skb, sizeof(rx_cmd_c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		packet = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		/* get the packet length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 			netif_dbg(dev, rx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 			/* last frame in this batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 			if (skb->len == size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 				lan78xx_rx_csum_offload(dev, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 							rx_cmd_a, rx_cmd_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 				lan78xx_rx_vlan_offload(dev, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 							rx_cmd_a, rx_cmd_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 				skb_trim(skb, skb->len - 4); /* remove fcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 				skb->truesize = size + sizeof(struct sk_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 			skb2 = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			if (unlikely(!skb2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 				netdev_warn(dev->net, "Error allocating skb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 			skb2->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 			skb2->data = packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 			skb_set_tail_pointer(skb2, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 			skb2->truesize = size + sizeof(struct sk_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 			lan78xx_skb_return(dev, skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 		skb_pull(skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		/* padding bytes before the next frame starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 		if (skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 			skb_pull(skb, align_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	if (!lan78xx_rx(dev, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	if (skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		lan78xx_skb_return(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	netif_dbg(dev, rx_err, dev->net, "drop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	skb_queue_tail(&dev->done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) static void rx_complete(struct urb *urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	struct skb_data *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	unsigned long lockflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	size_t size = dev->rx_urb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	skb = netdev_alloc_skb_ip_align(dev->net, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	entry = (struct skb_data *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	entry->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	entry->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 			  skb->data, size, rx_complete, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	if (netif_device_present(dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	    netif_running(dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		ret = usb_submit_urb(urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		case -ENODEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 			netif_device_detach(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 			ret = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 			netif_dbg(dev, rx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 				  "rx submit, %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 			tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 		ret = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static void rx_complete(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	struct skb_data	*entry = (struct skb_data *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	struct lan78xx_net *dev = entry->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 	int urb_status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	enum skb_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	skb_put(skb, urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	state = rx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	entry->urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	switch (urb_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		if (skb->len < dev->net->hard_header_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 			state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 			dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 			dev->net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 			netif_dbg(dev, rx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 				  "rx length %d\n", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 		usb_mark_last_busy(dev->udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	case -ECONNRESET:				/* async unlink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	case -ESHUTDOWN:				/* hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 		netif_dbg(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 			  "rx shutdown, code %d\n", urb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 		urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	case -EPROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	case -EILSEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 		state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	/* data overrun ... flush fifo? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	case -EOVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		dev->net->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		dev->net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	state = defer_bh(dev, skb, &dev->rxq, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	if (urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		if (netif_running(dev->net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		    state != unlink_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 			rx_submit(dev, urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) static void lan78xx_tx_bh(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	struct urb *urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	struct skb_data *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	struct sk_buff_head *tqp = &dev->txq_pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	struct sk_buff *skb, *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	int count, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	int skb_totallen, pkt_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	skb_totallen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 	pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	spin_lock_irqsave(&tqp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	skb_queue_walk(tqp, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 			if (!skb_queue_is_first(tqp, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 				/* handle previous packets first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 			length = skb->len - TX_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 			__skb_unlink(skb, tqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 			spin_unlock_irqrestore(&tqp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 			goto gso_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		pkt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	spin_unlock_irqrestore(&tqp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	/* copy to a single skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	skb_put(skb, skb_totallen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	for (count = pos = 0; count < pkt_cnt; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		skb2 = skb_dequeue(tqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 		if (skb2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 			length += (skb2->len - TX_OVERHEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 			memcpy(skb->data + pos, skb2->data, skb2->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 			pos += roundup(skb2->len, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 			dev_kfree_skb(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) gso_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	urb = usb_alloc_urb(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	if (!urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	entry = (struct skb_data *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	entry->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	entry->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	entry->length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	entry->num_of_packet = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 	spin_lock_irqsave(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	ret = usb_autopm_get_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		spin_unlock_irqrestore(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 			  skb->data, skb->len, tx_complete, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	if (length % dev->maxpacket == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		/* send USB_ZERO_PACKET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		urb->transfer_flags |= URB_ZERO_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	/* if this triggers the device is still a sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		/* transmission will be done in resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 		usb_anchor_urb(urb, &dev->deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		/* no use to process more packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		netif_stop_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		usb_put_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 		spin_unlock_irqrestore(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	ret = usb_submit_urb(urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 		netif_trans_update(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 			netif_stop_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 		netif_stop_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 		netif_dbg(dev, tx_err, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 			  "tx: submit urb err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	spin_unlock_irqrestore(&dev->txq.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 		dev->net->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 			dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		netif_dbg(dev, tx_queued, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) static void lan78xx_rx_bh(struct lan78xx_net *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 		for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 			urb = usb_alloc_urb(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 			if (urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 			tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 		netif_wake_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) static void lan78xx_bh(unsigned long param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	struct skb_data *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	while ((skb = skb_dequeue(&dev->done))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		entry = (struct skb_data *)(skb->cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		switch (entry->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		case rx_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 			entry->state = rx_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 			rx_process(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 		case tx_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 			usb_free_urb(entry->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		case rx_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 			usb_free_urb(entry->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 		/* reset update timer delta */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 			dev->delta = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 			mod_timer(&dev->stat_monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 				  jiffies + STAT_UPDATE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 		if (!skb_queue_empty(&dev->txq_pend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 			lan78xx_tx_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		if (!timer_pending(&dev->delay) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 		    !test_bit(EVENT_RX_HALT, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 			lan78xx_rx_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) static void lan78xx_delayedwork(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	struct lan78xx_net *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	dev = container_of(work, struct lan78xx_net, wq.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 		unlink_urbs(dev, &dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 		status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 			goto fail_pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 		status = usb_clear_halt(dev->udev, dev->pipe_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 		usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 		if (status < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 		    status != -EPIPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 		    status != -ESHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 			if (netif_msg_tx_err(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) fail_pipe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 				netdev_err(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 					   "can't clear tx halt, status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 					   status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 			clear_bit(EVENT_TX_HALT, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 			if (status != -ESHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 				netif_wake_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 		unlink_urbs(dev, &dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 				goto fail_halt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		status = usb_clear_halt(dev->udev, dev->pipe_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 		if (status < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		    status != -EPIPE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 		    status != -ESHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 			if (netif_msg_rx_err(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) fail_halt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 				netdev_err(dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 					   "can't clear rx halt, status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 					   status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 			clear_bit(EVENT_RX_HALT, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 			tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 		int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 		clear_bit(EVENT_LINK_RESET, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		status = usb_autopm_get_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 			goto skip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		if (lan78xx_link_reset(dev) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 			usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) skip_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 			netdev_info(dev->net, "link reset failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 				    ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 			usb_autopm_put_interface(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		lan78xx_update_stats(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		mod_timer(&dev->stat_monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		dev->delta = min((dev->delta * 2), 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) static void intr_complete(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	struct lan78xx_net *dev = urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	int status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	/* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 		lan78xx_status(dev, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	/* software-driven interface shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	case -ENOENT:			/* urb killed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	case -ESHUTDOWN:		/* hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 		netif_dbg(dev, ifdown, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 			  "intr shutdown, code %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	/* NOTE:  not throttling like RX/TX, since this endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	 * already polls infrequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 		netdev_dbg(dev->net, "intr status %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	if (!netif_running(dev->net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	status = usb_submit_urb(urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		netif_err(dev, timer, dev->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 			  "intr resubmit --> %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) static void lan78xx_disconnect(struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	struct lan78xx_net *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	struct usb_device *udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	struct net_device *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	usb_set_intfdata(intf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	udev = interface_to_usbdev(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	net = dev->net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	phydev = net->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	phy_disconnect(net->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	if (phy_is_pseudo_fixed_link(phydev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		fixed_phy_unregister(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	unregister_netdev(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	cancel_delayed_work_sync(&dev->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	usb_scuttle_anchored_urbs(&dev->deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	lan78xx_unbind(dev, intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	usb_kill_urb(dev->urb_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	usb_free_urb(dev->urb_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	free_netdev(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	usb_put_dev(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	struct lan78xx_net *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	unlink_urbs(dev, &dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 						struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 						netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		features &= ~NETIF_F_GSO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	features = vlan_features_check(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	features = vxlan_features_check(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) static const struct net_device_ops lan78xx_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	.ndo_open		= lan78xx_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	.ndo_stop		= lan78xx_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	.ndo_start_xmit		= lan78xx_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	.ndo_tx_timeout		= lan78xx_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	.ndo_change_mtu		= lan78xx_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	.ndo_do_ioctl		= phy_do_ioctl_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	.ndo_set_rx_mode	= lan78xx_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	.ndo_set_features	= lan78xx_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	.ndo_features_check	= lan78xx_features_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) static void lan78xx_stat_monitor(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) static int lan78xx_probe(struct usb_interface *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 			 const struct usb_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	struct lan78xx_net *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	struct usb_device *udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	unsigned maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	unsigned period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	u8 *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	udev = interface_to_usbdev(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	udev = usb_get_dev(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	if (!netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 		dev_err(&intf->dev, "Error: OOM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	/* netdev_printk() needs this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	SET_NETDEV_DEV(netdev, &intf->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	dev->udev = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	dev->intf = intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	dev->net = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	skb_queue_head_init(&dev->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	skb_queue_head_init(&dev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	skb_queue_head_init(&dev->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	skb_queue_head_init(&dev->rxq_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	skb_queue_head_init(&dev->txq_pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	mutex_init(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 	init_usb_anchor(&dev->deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	netdev->netdev_ops = &lan78xx_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	dev->delta = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	mutex_init(&dev->stats.access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	ep_intr = &intf->cur_altsetting->endpoint[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 					usb_endpoint_num(&ep_intr->desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	ret = lan78xx_bind(dev, intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	/* MTU range: 68 - 9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	period = ep_intr->desc.bInterval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	buf = kmalloc(maxp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		if (!dev->urb_intr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 			kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 			goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 			usb_fill_int_urb(dev->urb_intr, dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 					 dev->pipe_intr, buf, maxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 					 intr_complete, dev, period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	/* Reject broken descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	if (dev->maxpacket == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	/* driver requires remote-wakeup capability during autosuspend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	intf->needs_remote_wakeup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 	ret = lan78xx_phy_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 		goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	ret = register_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 		netif_err(dev, probe, netdev, "couldn't register the device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		goto out5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	usb_set_intfdata(intf, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	ret = device_set_wakeup_enable(&udev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	 /* Default delay of 2sec has more overhead than advantage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 	  * Set to 10sec as default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	pm_runtime_set_autosuspend_delay(&udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 					 DEFAULT_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) out5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	phy_disconnect(netdev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) out4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 	usb_free_urb(dev->urb_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	lan78xx_unbind(dev, intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	usb_put_dev(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 	const u16 crc16poly = 0x8005;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	u16 bit, crc, msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	u8 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	crc = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		data = *buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 		for (bit = 0; bit < 8; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 			msb = crc >> 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 			crc <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 			if (msb ^ (u16)(data & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 				crc ^= crc16poly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 				crc |= (u16)0x0001U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 			data >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	return crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	int mask_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	u16 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 	u32 temp_wucsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	u32 temp_pmt_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	const u8 arp_type[2] = { 0x08, 0x06 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	buf &= ~MAC_TX_TXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	buf &= ~MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	ret = lan78xx_write_reg(dev, WUCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 	temp_wucsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	temp_pmt_ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 	mask_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	if (wol & WAKE_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	if (wol & WAKE_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 		temp_wucsr |= WUCSR_MPEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	if (wol & WAKE_BCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		temp_wucsr |= WUCSR_BCST_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	if (wol & WAKE_MCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 		temp_wucsr |= WUCSR_WAKE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 					WUF_CFGX_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 					WUF_CFGX_TYPE_MCAST_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 					(crc & WUF_CFGX_CRC16_MASK_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 		mask_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		/* for IPv6 Multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 					WUF_CFGX_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 					WUF_CFGX_TYPE_MCAST_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 					(crc & WUF_CFGX_CRC16_MASK_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 		mask_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	if (wol & WAKE_UCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 		temp_wucsr |= WUCSR_PFDA_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	if (wol & WAKE_ARP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 		temp_wucsr |= WUCSR_WAKE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 		/* set WUF_CFG & WUF_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 		 * for packettype (offset 12,13) = ARP (0x0806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 					WUF_CFGX_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 					WUF_CFGX_TYPE_ALL_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 					(crc & WUF_CFGX_CRC16_MASK_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 		mask_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	/* when multiple WOL bits are set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	if (hweight_long((unsigned long)wol) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	/* clear WUPS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	buf |= PMT_CTL_WUPS_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	buf |= MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	struct lan78xx_net *dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	if (!dev->suspend_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		spin_lock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 		/* don't autosuspend while transmitting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 		if ((skb_queue_len(&dev->txq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 		     skb_queue_len(&dev->txq_pend)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 			PMSG_IS_AUTO(message)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 			spin_unlock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 			spin_unlock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 		/* stop TX & RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 		buf &= ~MAC_TX_TXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 		buf &= ~MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 		/* empty out the rx and queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 		netif_device_detach(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		lan78xx_terminate_urbs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		usb_kill_urb(dev->urb_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		/* reattach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 		netif_device_attach(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 		del_timer(&dev->stat_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 		if (PMSG_IS_AUTO(message)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 			/* auto suspend (selective suspend) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 			buf &= ~MAC_TX_TXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 			buf &= ~MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 			ret = lan78xx_write_reg(dev, WUCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 			/* set goodframe wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 			buf |= WUCSR_RFE_WAKE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 			buf |= WUCSR_STORE_WAKE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 			ret = lan78xx_write_reg(dev, WUCSR, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 			buf |= PMT_CTL_PHY_WAKE_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 			buf |= PMT_CTL_WOL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 			buf |= PMT_CTL_SUS_MODE_3_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 			buf |= PMT_CTL_WUPS_MASK_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 			buf |= MAC_RX_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 			lan78xx_set_suspend(dev, pdata->wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) static int lan78xx_resume(struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	struct lan78xx_net *dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	struct urb *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 	if (!timer_pending(&dev->stat_monitor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 		dev->delta = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 		mod_timer(&dev->stat_monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 			  jiffies + STAT_UPDATE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 	if (!--dev->suspend_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 		/* resume interrupt URBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 		spin_lock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 		while ((res = usb_get_from_anchor(&dev->deferred))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 			skb = (struct sk_buff *)res->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 			ret = usb_submit_urb(res, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 				dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 				usb_free_urb(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 				usb_autopm_put_interface_async(dev->intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 				netif_trans_update(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 		spin_unlock_irq(&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 				netif_start_queue(dev->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 			tasklet_schedule(&dev->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	ret = lan78xx_write_reg(dev, WUCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 					     WUCSR2_ARP_RCD_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 					     WUCSR2_IPV6_TCPSYN_RCD_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 					     WUCSR2_IPV4_TCPSYN_RCD_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 					    WUCSR_EEE_RX_WAKE_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 					    WUCSR_PFDA_FR_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 					    WUCSR_RFE_WAKE_FR_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 					    WUCSR_WUFR_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 					    WUCSR_MPR_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 					    WUCSR_BCST_FR_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	buf |= MAC_TX_TXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) static int lan78xx_reset_resume(struct usb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 	struct lan78xx_net *dev = usb_get_intfdata(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 	lan78xx_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 	phy_start(dev->net->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	return lan78xx_resume(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) static const struct usb_device_id products[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 	/* LAN7800 USB Gigabit Ethernet Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 	/* LAN7850 USB Gigabit Ethernet Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	/* LAN7801 USB Gigabit Ethernet Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	/* ATM2-AF USB Gigabit Ethernet Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) MODULE_DEVICE_TABLE(usb, products);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) static struct usb_driver lan78xx_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	.name			= DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	.id_table		= products,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	.probe			= lan78xx_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	.disconnect		= lan78xx_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	.suspend		= lan78xx_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	.resume			= lan78xx_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	.reset_resume		= lan78xx_reset_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	.supports_autosuspend	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	.disable_hub_initiated_lpm = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) module_usb_driver(lan78xx_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) MODULE_AUTHOR(DRIVER_AUTHOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) MODULE_DESCRIPTION(DRIVER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) MODULE_LICENSE("GPL");