Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Faraday FTGMAC100 Gigabit Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * (C) Copyright 2009-2011 Faraday Technology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Po-Yu Chuang <ratbert@faraday-tech.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/property.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <net/ncsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "ftgmac100.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define DRV_NAME	"ftgmac100"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) /* Arbitrary values, I am not sure the HW has limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define MAX_RX_QUEUE_ENTRIES	1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define MAX_TX_QUEUE_ENTRIES	1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define MIN_RX_QUEUE_ENTRIES	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define MIN_TX_QUEUE_ENTRIES	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /* Defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define DEF_RX_QUEUE_ENTRIES	128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define DEF_TX_QUEUE_ENTRIES	128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define MAX_PKT_SIZE		1536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define RX_BUF_SIZE		MAX_PKT_SIZE	/* must be smaller than 0x3fff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) /* Min number of tx ring entries before stopping queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define TX_THRESHOLD		(MAX_SKB_FRAGS + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define FTGMAC_100MHZ		100000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define FTGMAC_25MHZ		25000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) struct ftgmac100 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	/* Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	/* Rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	unsigned int rx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct ftgmac100_rxdes *rxdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	dma_addr_t rxdes_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	struct sk_buff **rx_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	unsigned int rx_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	u32 rxdes0_edorr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	/* Tx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	unsigned int tx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct ftgmac100_txdes *txdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	dma_addr_t txdes_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct sk_buff **tx_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	unsigned int tx_clean_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	unsigned int tx_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	u32 txdes0_edotr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	/* Used to signal the reset task of ring change request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	unsigned int new_rx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	unsigned int new_tx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	/* Scratch page to use when rx skb alloc fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	void *rx_scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	dma_addr_t rx_scratch_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	/* Component structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct ncsi_dev *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct work_struct reset_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct mii_bus *mii_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/* AST2500/AST2600 RMII ref clock gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct clk *rclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	/* Link management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	int cur_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	int cur_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	bool use_ncsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	/* Multicast filter settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u32 maht0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u32 maht1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	/* Flow control settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	bool tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	bool rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	bool aneg_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	/* Misc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	bool need_mac_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	bool is_aspeed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	/* NOTE: reset clears all registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		  priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	for (i = 0; i < 200; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		unsigned int maccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		if (!(maccr & FTGMAC100_MACCR_SW_RST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	netdev_err(netdev, "Hardware reset failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	u32 maccr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	switch (priv->cur_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	case 0: /* no link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		maccr |= FTGMAC100_MACCR_FAST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		maccr |= FTGMAC100_MACCR_GIGA_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		netdev_err(priv->netdev, "Unknown speed %d !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			   priv->cur_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* (Re)initialize the queue pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	priv->rx_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	priv->tx_clean_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	priv->tx_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/* The doc says reset twice with 10us interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (ftgmac100_reset_mac(priv, maccr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	usleep_range(10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	return ftgmac100_reset_mac(priv, maccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	unsigned int maddr = mac[0] << 8 | mac[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static void ftgmac100_initial_mac(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	u8 mac[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	unsigned int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	unsigned int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		ether_addr_copy(priv->netdev->dev_addr, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		dev_info(priv->dev, "Read MAC address %pM from device tree\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			 mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	mac[0] = (m >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	mac[1] = m & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	mac[2] = (l >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	mac[3] = (l >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	mac[4] = (l >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	mac[5] = l & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (is_valid_ether_addr(mac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		ether_addr_copy(priv->netdev->dev_addr, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		eth_hw_addr_random(priv->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		dev_info(priv->dev, "Generated random MAC address %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			 priv->netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	ret = eth_prepare_mac_addr_change(dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	eth_commit_mac_addr_change(dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void ftgmac100_config_pause(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/* Throttle tx queue when receiving pause frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (priv->rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		fcr |= FTGMAC100_FCR_FC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	/* Enables sending pause frames when the RX queue is past a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	 * certain threshold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (priv->tx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		fcr |= FTGMAC100_FCR_FCTHR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static void ftgmac100_init_hw(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u32 reg, rfifo_sz, tfifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/* Clear stale interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	/* Setup RX ring buffer base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/* Setup TX ring buffer base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	/* Configure RX buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		  priv->base + FTGMAC100_OFFSET_RBSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	/* Set RX descriptor autopoll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		  priv->base + FTGMAC100_OFFSET_APTC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/* Write MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	/* Write multicast filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	/* Configure descriptor sizes and increase burst sizes according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 * to values in Aspeed SDK. The FIFO arbitration is enabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	 * the thresholds set based on the recommended values in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	 * AST2400 specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) |   /* 2*8 bytes RX descs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		  FTGMAC100_DBLAC_TXDES_SIZE(2) |   /* 2*8 bytes TX descs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		  FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		  FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		  FTGMAC100_DBLAC_RX_THR_EN |       /* Enable fifo threshold arb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		  FTGMAC100_DBLAC_RXFIFO_HTHR(6) |  /* 6/8 of FIFO high threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		  FTGMAC100_DBLAC_RXFIFO_LTHR(2),   /* 2/8 of FIFO low threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		  priv->base + FTGMAC100_OFFSET_DBLAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * mitigation doesn't seem to provide any benefit with NAPI so leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 * it at that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		  FTGMAC100_ITC_TXINT_THR(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		  priv->base + FTGMAC100_OFFSET_ITC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/* Configure FIFO sizes in the TPAFCR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	rfifo_sz = reg & 0x00000007;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	tfifo_sz = (reg >> 3) & 0x00000007;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	reg &= ~0x3f000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	reg |= (tfifo_sz << 27);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	reg |= (rfifo_sz << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) static void ftgmac100_start_hw(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/* Keep the original GMAC and FAST bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	/* Add all the main enable bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	maccr |= FTGMAC100_MACCR_TXDMA_EN	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		 FTGMAC100_MACCR_RXDMA_EN	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		 FTGMAC100_MACCR_TXMAC_EN	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		 FTGMAC100_MACCR_RXMAC_EN	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		 FTGMAC100_MACCR_CRC_APD	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		 FTGMAC100_MACCR_PHY_LINK_LEVEL	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		 FTGMAC100_MACCR_RX_RUNT	|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		 FTGMAC100_MACCR_RX_BROADPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	/* Add other bits as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (priv->cur_duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		maccr |= FTGMAC100_MACCR_FULLDUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (priv->netdev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		maccr |= FTGMAC100_MACCR_RX_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (priv->netdev->flags & IFF_ALLMULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	else if (netdev_mc_count(priv->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* Vlan filtering enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		maccr |= FTGMAC100_MACCR_RM_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	/* Hit the HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static void ftgmac100_stop_hw(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	priv->maht1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	priv->maht0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	netdev_for_each_mc_addr(ha, priv->netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		crc_val = (~(crc_val >> 2)) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		if (crc_val >= 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			priv->maht1 |= 1ul << (crc_val - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			priv->maht0 |= 1ul << (crc_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static void ftgmac100_set_rx_mode(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	/* Setup the hash filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	ftgmac100_calc_mc_hash(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	/* Interface down ? that's all there is to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (!netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	/* Update the HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/* Reconfigure MACCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	ftgmac100_start_hw(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 				  struct ftgmac100_rxdes *rxdes, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			netdev_warn(netdev, "failed to allocate rx skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		map = priv->rx_scratch_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				     DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		if (unlikely(dma_mapping_error(priv->dev, map))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 				netdev_err(netdev, "failed to map rx page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 			dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			map = priv->rx_scratch_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	/* Store skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	priv->rx_skbs[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	/* Store DMA address into RX desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	rxdes->rxdes3 = cpu_to_le32(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* Ensure the above is ordered vs clearing the OWN bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/* Clean status (which resets own bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (entry == (priv->rx_q_entries - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		rxdes->rxdes0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 					      unsigned int pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	return (pointer + 1) & (priv->rx_q_entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (status & FTGMAC100_RXDES0_RX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		netdev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (status & FTGMAC100_RXDES0_CRC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		netdev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (status & (FTGMAC100_RXDES0_FTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		      FTGMAC100_RXDES0_RUNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		      FTGMAC100_RXDES0_RX_ODD_NB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		netdev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct ftgmac100_rxdes *rxdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	unsigned int pointer, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	u32 status, csum_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	/* Grab next RX descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	pointer = priv->rx_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	rxdes = &priv->rxdes[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	/* Grab descriptor status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	status = le32_to_cpu(rxdes->rxdes0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/* Do we have a packet ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* Order subsequent reads with the test for the ready bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* We don't cope with fragmented RX packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		     !(status & FTGMAC100_RXDES0_LRS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	/* Grab received size and csum vlan field in the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	size = status & FTGMAC100_RXDES0_VDBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	csum_vlan = le32_to_cpu(rxdes->rxdes1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	/* Any error (other than csum offload) flagged ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (unlikely(status & RXDES0_ANY_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		/* Correct for incorrect flagging of runt packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		 * with vlan tags... Just accept a runt packet that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		 * has been flagged as vlan and whose size is at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		 * least 60 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		if ((status & FTGMAC100_RXDES0_RUNT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		    (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		    (size >= 60))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			status &= ~FTGMAC100_RXDES0_RUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		/* Any error still in there ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		if (status & RXDES0_ANY_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			ftgmac100_rx_packet_error(priv, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	/* If the packet had no skb (failed to allocate earlier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * then try to allocate one and skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	skb = priv->rx_skbs[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (!unlikely(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		netdev->stats.multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	/* If the HW found checksum errors, bounce it to software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * If we didn't, we need to see if the packet was recognized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * by HW as one of the supported checksummed protocols before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 * we accept the HW test results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (netdev->features & NETIF_F_RXCSUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			FTGMAC100_RXDES1_IP_CHKSUM_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		if ((csum_vlan & err_bits) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		    !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* Transfer received size to skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	skb_put(skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* Extract vlan tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	    (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 				       csum_vlan & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	/* Tear down DMA mapping, do necessary cache management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	map = le32_to_cpu(rxdes->rxdes3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/* When we don't have an iommu, we can save cycles by not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * invalidating the cache for the part of the packet that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 * wasn't received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/* Resplenish rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	skb->protocol = eth_type_trans(skb, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	netdev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	netdev->stats.rx_bytes += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	/* push packet to protocol stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	if (skb->ip_summed == CHECKSUM_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		napi_gro_receive(&priv->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	(*processed)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/* Clean rxdes0 (which resets own bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	netdev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				     unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (index == (priv->tx_q_entries - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		return priv->txdes0_edotr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 					      unsigned int pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	return (pointer + 1) & (priv->tx_q_entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/* Returns the number of available slots in the TX queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * This always leaves one free slot so we don't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 * worry about empty vs. full, and this simplifies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 * test for ftgmac100_tx_buf_cleanable() below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		(priv->tx_q_entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	return priv->tx_pointer != priv->tx_clean_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				     unsigned int pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				     struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				     struct ftgmac100_txdes *txdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				     u32 ctl_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	dma_addr_t map = le32_to_cpu(txdes->txdes3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (ctl_stat & FTGMAC100_TXDES0_FTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* Free SKB on last segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (ctl_stat & FTGMAC100_TXDES0_LTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	priv->tx_skbs[pointer] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	struct ftgmac100_txdes *txdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	unsigned int pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	u32 ctl_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	pointer = priv->tx_clean_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	txdes = &priv->txdes[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	ctl_stat = le32_to_cpu(txdes->txdes0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	skb = priv->tx_skbs[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	netdev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	netdev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) static void ftgmac100_tx_complete(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	/* Process all completed packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	while (ftgmac100_tx_buf_cleanable(priv) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	       ftgmac100_tx_complete_packet(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	/* Restart queue if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	if (unlikely(netif_queue_stopped(netdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		     ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		txq = netdev_get_tx_queue(netdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		__netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		if (netif_queue_stopped(netdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		    ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			netif_wake_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		__netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		u8 ip_proto = ip_hdr(skb)->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		*csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		switch(ip_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			*csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			*csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		case IPPROTO_IP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	return skb_checksum_help(skb) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 					     struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	struct ftgmac100_txdes *txdes, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	unsigned int pointer, nfrags, len, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	u32 f_ctl_stat, ctl_stat, csum_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	/* The HW doesn't pad small frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (eth_skb_pad(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		netdev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	/* Reject oversize packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (unlikely(skb->len > MAX_PKT_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			netdev_dbg(netdev, "tx packet too big\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* Do we have a limit on #fragments ? I yet have to get a reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	 * from Aspeed. If there's one I haven't hit it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	nfrags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* Setup HW checksumming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	csum_vlan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	    !ftgmac100_prep_tx_csum(skb, &csum_vlan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/* Add VLAN tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/* Get header len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/* Map the packet head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (dma_mapping_error(priv->dev, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			netdev_err(netdev, "map tx packet head failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	/* Grab the next free tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	pointer = priv->tx_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	txdes = first = &priv->txdes[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	/* Setup it up with the packet head. Don't write the head to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * ring just yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	priv->tx_skbs[pointer] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	f_ctl_stat |= FTGMAC100_TXDES0_FTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (nfrags == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		f_ctl_stat |= FTGMAC100_TXDES0_LTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	txdes->txdes3 = cpu_to_le32(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	txdes->txdes1 = cpu_to_le32(csum_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	/* Next descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	pointer = ftgmac100_next_tx_pointer(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	/* Add the fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	for (i = 0; i < nfrags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		/* Map it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		map = skb_frag_dma_map(priv->dev, frag, 0, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		if (dma_mapping_error(priv->dev, map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			goto dma_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		/* Setup descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		priv->tx_skbs[pointer] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		txdes = &priv->txdes[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		if (i == (nfrags - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			ctl_stat |= FTGMAC100_TXDES0_LTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		txdes->txdes0 = cpu_to_le32(ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		txdes->txdes1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		txdes->txdes3 = cpu_to_le32(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		/* Next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		pointer = ftgmac100_next_tx_pointer(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	/* Order the previous packet and descriptor udpates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * before setting the OWN bit on the first descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	first->txdes0 = cpu_to_le32(f_ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	/* Update next TX pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	priv->tx_pointer = pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	/* If there isn't enough room for all the fragments of a new packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 * in the TX ring, stop the queue. The sequence below is race free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 * vs. a concurrent restart in ftgmac100_poll()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		/* Order the queue stop with the test below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			netif_wake_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	/* Poke transmitter to read the updated TX descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  dma_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		netdev_err(netdev, "map tx fragment failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	/* Free head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	pointer = priv->tx_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/* Then all fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		pointer = ftgmac100_next_tx_pointer(priv, pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		txdes = &priv->txdes[pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		ctl_stat = le32_to_cpu(txdes->txdes0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/* This cannot be reached if we successfully mapped the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 * last fragment, so we know ftgmac100_free_tx_packet()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * hasn't freed the skb yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	/* Drop the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	netdev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) static void ftgmac100_free_buffers(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/* Free all RX buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	for (i = 0; i < priv->rx_q_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		struct sk_buff *skb = priv->rx_skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		priv->rx_skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	/* Free all TX buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	for (i = 0; i < priv->tx_q_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		struct ftgmac100_txdes *txdes = &priv->txdes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		struct sk_buff *skb = priv->tx_skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		ftgmac100_free_tx_packet(priv, i, skb, txdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 					 le32_to_cpu(txdes->txdes0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) static void ftgmac100_free_rings(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/* Free skb arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	kfree(priv->rx_skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	kfree(priv->tx_skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	/* Free descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (priv->rxdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				  sizeof(struct ftgmac100_rxdes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 				  priv->rxdes, priv->rxdes_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	priv->rxdes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (priv->txdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 				  sizeof(struct ftgmac100_txdes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				  priv->txdes, priv->txdes_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	priv->txdes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	/* Free scratch packet buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (priv->rx_scratch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		dma_free_coherent(priv->dev, RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 				  priv->rx_scratch, priv->rx_scratch_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	/* Allocate skb arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (!priv->rx_skbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (!priv->tx_skbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	/* Allocate descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	priv->rxdes = dma_alloc_coherent(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 					 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 					 &priv->rxdes_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (!priv->rxdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	priv->txdes = dma_alloc_coherent(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 					 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 					 &priv->txdes_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (!priv->txdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	/* Allocate scratch packet buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	priv->rx_scratch = dma_alloc_coherent(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 					      RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 					      &priv->rx_scratch_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (!priv->rx_scratch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static void ftgmac100_init_rings(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct ftgmac100_rxdes *rxdes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct ftgmac100_txdes *txdes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	/* Update entries counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	priv->rx_q_entries = priv->new_rx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	priv->tx_q_entries = priv->new_tx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	/* Initialize RX ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	for (i = 0; i < priv->rx_q_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		rxdes = &priv->rxdes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		rxdes->rxdes0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	/* Mark the end of the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	/* Initialize TX ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	for (i = 0; i < priv->tx_q_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		txdes = &priv->txdes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		txdes->txdes0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	for (i = 0; i < priv->rx_q_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) static void ftgmac100_adjust_link(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct phy_device *phydev = netdev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	bool tx_pause, rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	int new_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	/* We store "no link" as speed 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (!phydev->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		new_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		new_speed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	/* Grab pause settings from PHY if configured to do so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (priv->aneg_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		rx_pause = tx_pause = phydev->pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		if (phydev->asym_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			tx_pause = !rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		rx_pause = priv->rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		tx_pause = priv->tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	/* Link hasn't changed, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (phydev->speed == priv->cur_speed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	    phydev->duplex == priv->cur_duplex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	    rx_pause == priv->rx_pause &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	    tx_pause == priv->tx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	/* Print status if we have a link or we had one and just lost it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * don't print otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (new_speed || priv->cur_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		phy_print_status(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	priv->cur_speed = new_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	priv->cur_duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	priv->rx_pause = rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	priv->tx_pause = tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	/* Link is down, do nothing else */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (!new_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	/* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	/* Reset the adapter asynchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	schedule_work(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	phydev = phy_find_first(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	if (!phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		netdev_info(netdev, "%s: no PHY found\n", netdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	phydev = phy_connect(netdev, phydev_name(phydev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			     &ftgmac100_adjust_link, intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (IS_ERR(phydev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		return PTR_ERR(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	/* Indicate that we support PAUSE frames (see comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	 * Documentation/networking/phy.rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	phy_support_asym_pause(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	/* Display what we found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	phy_attached_info(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct net_device *netdev = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	unsigned int phycr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	/* preserve MDC cycle threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		 FTGMAC100_PHYCR_REGAD(regnum) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		 FTGMAC100_PHYCR_MIIRD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			return FTGMAC100_PHYDATA_MIIRDATA(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	netdev_err(netdev, "mdio read timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				   int regnum, u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct net_device *netdev = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	unsigned int phycr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	/* preserve MDC cycle threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		 FTGMAC100_PHYCR_REGAD(regnum) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		 FTGMAC100_PHYCR_MIIWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	data = FTGMAC100_PHYDATA_MIIWDATA(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	netdev_err(netdev, "mdio write timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static void ftgmac100_get_drvinfo(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				  struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static void ftgmac100_get_ringparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 				    struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	memset(ering, 0, sizeof(*ering));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	ering->rx_pending = priv->rx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	ering->tx_pending = priv->tx_q_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static int ftgmac100_set_ringparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				   struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	    ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	    ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	    ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	    !is_power_of_2(ering->rx_pending) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	    !is_power_of_2(ering->tx_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	priv->new_rx_q_entries = ering->rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	priv->new_tx_q_entries = ering->tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		schedule_work(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void ftgmac100_get_pauseparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 				     struct ethtool_pauseparam *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	pause->autoneg = priv->aneg_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	pause->tx_pause = priv->tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	pause->rx_pause = priv->rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int ftgmac100_set_pauseparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 				    struct ethtool_pauseparam *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	struct phy_device *phydev = netdev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	priv->aneg_pause = pause->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	priv->tx_pause = pause->tx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	priv->rx_pause = pause->rx_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (netif_running(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (!(phydev && priv->aneg_pause))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			ftgmac100_config_pause(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static const struct ethtool_ops ftgmac100_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	.get_drvinfo		= ftgmac100_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	.get_link		= ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	.nway_reset		= phy_ethtool_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	.get_ringparam		= ftgmac100_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	.set_ringparam		= ftgmac100_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	.get_pauseparam		= ftgmac100_get_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	.set_pauseparam		= ftgmac100_set_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	struct net_device *netdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	unsigned int status, new_mask = FTGMAC100_INT_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	/* Fetch and clear interrupt bits, process abnormal ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (unlikely(status & FTGMAC100_INT_BAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		/* RX buffer unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		if (status & FTGMAC100_INT_NO_RXBUF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			netdev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		/* received packet lost due to RX FIFO full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		if (status & FTGMAC100_INT_RPKT_LOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			netdev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		/* sent packet lost due to excessive TX collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		if (status & FTGMAC100_INT_XPKT_LOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			netdev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		/* AHB error -> Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		if (status & FTGMAC100_INT_AHB_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				netdev_warn(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 					   "AHB bus error ! Resetting chip.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			schedule_work(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		/* We may need to restart the MAC after such errors, delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		 * this until after we have freed some Rx buffers though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		priv->need_mac_restart = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		/* Disable those errors until we restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		new_mask &= ~status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	/* Only enable "bad" interrupts while NAPI is on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	/* Schedule NAPI bh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	napi_schedule_irqoff(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static bool ftgmac100_check_rx(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	/* Do we have a packet ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static int ftgmac100_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	bool more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	/* Handle TX completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (ftgmac100_tx_buf_cleanable(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		ftgmac100_tx_complete(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	/* Handle RX packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		more = ftgmac100_rx_packet(priv, &work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	} while (more && work_done < budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	/* The interrupt is telling us to kick the MAC back to life
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	 * after an RX overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (unlikely(priv->need_mac_restart)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		ftgmac100_start_hw(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		priv->need_mac_restart = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		/* Re-enable "bad" interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		iowrite32(FTGMAC100_INT_BAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			  priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	/* As long as we are waiting for transmit packets to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	 * completed we keep NAPI going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (ftgmac100_tx_buf_cleanable(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		work_done = budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		/* We are about to re-enable all interrupts. However
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 * the HW has been latching RX/TX packet interrupts while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		 * they were masked. So we clear them first, then we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		 * to re-check if there's something to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		iowrite32(FTGMAC100_INT_RXTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			  priv->base + FTGMAC100_OFFSET_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		/* Push the above (and provides a barrier vs. subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		 * reads of the descriptor).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		ioread32(priv->base + FTGMAC100_OFFSET_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		/* Check RX and TX descriptors for more work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		if (ftgmac100_check_rx(priv) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		    ftgmac100_tx_buf_cleanable(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			return budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		/* deschedule NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		/* enable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		iowrite32(FTGMAC100_INT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			  priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	/* Re-init descriptors (adjust queue sizes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	ftgmac100_init_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* Realloc rx descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	err = ftgmac100_alloc_rx_buffers(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (err && !ignore_alloc_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	/* Reinit and restart HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	ftgmac100_init_hw(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	ftgmac100_config_pause(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	ftgmac100_start_hw(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	/* Re-enable the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	netif_start_queue(priv->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	/* Enable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) static void ftgmac100_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	struct ftgmac100 *priv = container_of(work, struct ftgmac100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 					      reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	struct net_device *netdev = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	netdev_dbg(netdev, "Resetting NIC...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	/* Lock the world */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (netdev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		mutex_lock(&netdev->phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (priv->mii_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		mutex_lock(&priv->mii_bus->mdio_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	/* Check if the interface is still up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (!netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	/* Stop the network stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	netif_trans_update(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	netif_tx_disable(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	/* Stop and reset the MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	ftgmac100_stop_hw(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	err = ftgmac100_reset_and_config_mac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		/* Not much we can do ... it might come back... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		netdev_err(netdev, "attempting to continue...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	/* Free all rx and tx buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	ftgmac100_free_buffers(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	/* Setup everything again and restart chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	ftgmac100_init_all(priv, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	netdev_dbg(netdev, "Reset done !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (priv->mii_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		mutex_unlock(&priv->mii_bus->mdio_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (netdev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		mutex_unlock(&netdev->phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static int ftgmac100_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	/* Allocate ring buffers  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	err = ftgmac100_alloc_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		netdev_err(netdev, "Failed to allocate descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	/* When using NC-SI we force the speed to 100Mbit/s full duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	 * Otherwise we leave it set to 0 (no link), the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	 * message from the PHY layer will handle setting it up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	 * something else if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (priv->use_ncsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		priv->cur_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		priv->cur_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		priv->cur_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		priv->cur_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/* Reset the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	err = ftgmac100_reset_and_config_mac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		goto err_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	/* Initialize NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	/* Grab our interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	/* Start things up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	err = ftgmac100_init_all(priv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		netdev_err(netdev, "Failed to allocate packet buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (netdev->phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		/* If we have a PHY, start polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		phy_start(netdev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	} else if (priv->use_ncsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		/* If using NC-SI, set our carrier on and start the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		netif_carrier_on(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		/* Start the NCSI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		err = ncsi_start_dev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			goto err_ncsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)  err_ncsi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	ftgmac100_free_buffers(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	free_irq(netdev->irq, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  err_hw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	ftgmac100_free_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static int ftgmac100_stop(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	/* Note about the reset task: We are called with the rtnl lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	 * held, so we are synchronized against the core of the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	 * task. We must not try to synchronously cancel it otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	 * we can deadlock. But since it will test for netif_running()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	 * which has already been cleared by the net core, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	 * anything special to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	/* disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (netdev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		phy_stop(netdev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	else if (priv->use_ncsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		ncsi_stop_dev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	ftgmac100_stop_hw(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	free_irq(netdev->irq, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	ftgmac100_free_buffers(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	ftgmac100_free_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	/* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	/* Do the reset outside of interrupt context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	schedule_work(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int ftgmac100_set_features(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 				  netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	netdev_features_t changed = netdev->features ^ features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	if (!netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	/* Update the vlan filtering bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		u32 maccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			maccr |= FTGMAC100_MACCR_RM_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			maccr &= ~FTGMAC100_MACCR_RM_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static void ftgmac100_poll_controller(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	ftgmac100_interrupt(netdev->irq, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static const struct net_device_ops ftgmac100_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	.ndo_open		= ftgmac100_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	.ndo_stop		= ftgmac100_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	.ndo_start_xmit		= ftgmac100_hard_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	.ndo_set_mac_address	= ftgmac100_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	.ndo_do_ioctl		= phy_do_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	.ndo_tx_timeout		= ftgmac100_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	.ndo_set_rx_mode	= ftgmac100_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	.ndo_set_features	= ftgmac100_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	.ndo_poll_controller	= ftgmac100_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	.ndo_vlan_rx_add_vid	= ncsi_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	.ndo_vlan_rx_kill_vid	= ncsi_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int ftgmac100_setup_mdio(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	struct platform_device *pdev = to_platform_device(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	/* initialize mdio bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	priv->mii_bus = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	if (!priv->mii_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	    of_device_is_compatible(np, "aspeed,ast2500-mac")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		/* The AST2600 has a separate MDIO controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		/* For the AST2400 and AST2500 this driver only supports the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		 * old MDIO interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	/* Get PHY mode from device-tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		/* Default to RGMII. It's a gigabit part after all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		err = of_get_phy_mode(np, &phy_intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			phy_intf = PHY_INTERFACE_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		/* Aspeed only supports these. I don't know about other IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		 * block vendors so I'm going to just let them through for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		 * now. Note that this is only a warning if for some obscure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		 * reason the DT really means to lie about it or it's a newer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		 * part we don't know about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		 * On the Aspeed SoC there are additionally straps and SCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		 * control bits that could tell us what the interface is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		 * (or allow us to configure it while the IP block is held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		 * in reset). For now I chose to keep this driver away from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		 * those SoC specific bits and assume the device-tree is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		 * right and the SCU has been configured properly by pinmux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		 * or the firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		if (priv->is_aspeed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		    phy_intf != PHY_INTERFACE_MODE_RMII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		    phy_intf != PHY_INTERFACE_MODE_RGMII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		    phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		    phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		    phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			netdev_warn(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 				   "Unsupported PHY mode %s !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 				   phy_modes(phy_intf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	priv->mii_bus->name = "ftgmac100_mdio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		 pdev->name, pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	priv->mii_bus->parent = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	priv->mii_bus->priv = priv->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	priv->mii_bus->read = ftgmac100_mdiobus_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	priv->mii_bus->write = ftgmac100_mdiobus_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	for (i = 0; i < PHY_MAX_ADDR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		priv->mii_bus->irq[i] = PHY_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	err = mdiobus_register(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		dev_err(priv->dev, "Cannot register MDIO bus!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		goto err_register_mdiobus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	err = ftgmac100_mii_probe(priv, phy_intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		dev_err(priv->dev, "MII Probe failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		goto err_mii_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) err_mii_probe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	mdiobus_unregister(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) err_register_mdiobus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	mdiobus_free(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) static void ftgmac100_destroy_mdio(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	struct ftgmac100 *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (!netdev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	phy_disconnect(netdev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	mdiobus_unregister(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	mdiobus_free(priv->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (unlikely(nd->state != ncsi_dev_state_functional))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	netdev_dbg(nd->dev, "NCSI interface %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		   nd->link_up ? "up" : "down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static int ftgmac100_setup_clk(struct ftgmac100 *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	priv->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	rc = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	/* Aspeed specifies a 100MHz clock is required for up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	 * is sufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			  FTGMAC_100MHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		goto cleanup_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	/* RCLK is for RMII, typically used for NCSI. Optional because it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	 * necessary if it's the AST2400 MAC, or the MAC is configured for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	 * RGMII, or the controller is not an ASPEED-based controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	rc = clk_prepare_enable(priv->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) cleanup_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static int ftgmac100_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	struct ftgmac100 *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	/* setup net_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	netdev = alloc_etherdev(sizeof(*priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (!netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		goto err_alloc_etherdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	SET_NETDEV_DEV(netdev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	netdev->ethtool_ops = &ftgmac100_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	netdev->netdev_ops = &ftgmac100_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	netdev->watchdog_timeo = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	platform_set_drvdata(pdev, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	/* setup private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	priv->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	priv->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	/* map io memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	priv->res = request_mem_region(res->start, resource_size(res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 				       dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (!priv->res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		dev_err(&pdev->dev, "Could not reserve memory region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		goto err_req_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	priv->base = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	if (!priv->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		goto err_ioremap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	netdev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	/* Enable pause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	priv->tx_pause = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	priv->rx_pause = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	priv->aneg_pause = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	/* MAC address from chip or random one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	ftgmac100_initial_mac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		   of_device_is_compatible(np, "aspeed,ast2500-mac") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		   of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		priv->rxdes0_edorr_mask = BIT(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		priv->txdes0_edotr_mask = BIT(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		priv->is_aspeed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		/* Disable ast2600 problematic HW arbitration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 			iowrite32(FTGMAC100_TM_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 				  priv->base + FTGMAC100_OFFSET_TM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		priv->rxdes0_edorr_mask = BIT(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		priv->txdes0_edotr_mask = BIT(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	if (np && of_get_property(np, "use-ncsi", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		if (!IS_ENABLED(CONFIG_NET_NCSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			dev_err(&pdev->dev, "NCSI stack not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 			goto err_ncsi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		dev_info(&pdev->dev, "Using NCSI interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		priv->use_ncsi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		if (!priv->ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			goto err_ncsi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	} else if (np && of_get_property(np, "phy-handle", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		struct phy_device *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		phy = of_phy_get_and_connect(priv->netdev, np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 					     &ftgmac100_adjust_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		if (!phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			dev_err(&pdev->dev, "Failed to connect to phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			goto err_setup_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		/* Indicate that we support PAUSE frames (see comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		 * Documentation/networking/phy.rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		phy_support_asym_pause(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		/* Display what we found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		phy_attached_info(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	} else if (np && !of_get_child_by_name(np, "mdio")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		/* Support legacy ASPEED devicetree descriptions that decribe a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		 * MAC with an embedded MDIO controller but have no "mdio"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		 * child node. Automatically scan the MDIO bus for available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		 * PHYs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		priv->use_ncsi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		err = ftgmac100_setup_mdio(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			goto err_setup_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	if (priv->is_aspeed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		err = ftgmac100_setup_clk(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			goto err_ncsi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	/* Default ring sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	/* Base feature set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		NETIF_F_HW_VLAN_CTAG_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	if (priv->use_ncsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	/* AST2400  doesn't have working HW checksum generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		netdev->hw_features &= ~NETIF_F_HW_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	if (np && of_get_property(np, "no-hw-checksum", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	netdev->features |= netdev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	/* register network device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	err = register_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		dev_err(&pdev->dev, "Failed to register netdev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		goto err_register_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) err_register_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	clk_disable_unprepare(priv->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) err_ncsi_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	if (priv->ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		ncsi_unregister_dev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	ftgmac100_destroy_mdio(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) err_setup_mdio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	iounmap(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) err_ioremap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	release_resource(priv->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) err_req_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) err_alloc_etherdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) static int ftgmac100_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	struct ftgmac100 *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	netdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (priv->ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		ncsi_unregister_dev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	unregister_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	clk_disable_unprepare(priv->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	/* There's a small chance the reset task will have been re-queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	 * during stop, make sure it's gone before we free the structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	cancel_work_sync(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	ftgmac100_destroy_mdio(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	iounmap(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	release_resource(priv->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) static const struct of_device_id ftgmac100_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	{ .compatible = "faraday,ftgmac100" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) static struct platform_driver ftgmac100_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	.probe	= ftgmac100_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	.remove	= ftgmac100_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	.driver	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		.name		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		.of_match_table	= ftgmac100_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) module_platform_driver(ftgmac100_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) MODULE_DESCRIPTION("FTGMAC100 driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) MODULE_LICENSE("GPL");