Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* drivers/net/ethernet/freescale/gianfar.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Gianfar Ethernet Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This driver is designed for the non-CPM ethernet controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * on the 85xx and 83xx family of integrated processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Based on 8260_io/fcc_enet.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Author: Andy Fleming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Maintainer: Kumar Gala
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * Copyright 2007 MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *  Gianfar:  AKA Lambda Draconis, "Dragon"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *  RA 11 31 24.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *  Dec +69 19 52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *  V 3.84
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *  B-V +1.62
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *  Theory of operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *  The driver is initialized through of_device. Configuration information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *  is therefore conveyed through an OF-style device tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *  The Gianfar Ethernet Controller uses a ring of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *  descriptors.  The beginning is indicated by a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *  pointing to the physical address of the start of the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *  The end is determined by a "wrap" bit being set in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *  last descriptor of the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *  When a packet is received, the RXF bit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *  IEVENT register is set, triggering an interrupt when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *  corresponding bit in the IMASK register is also set (if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *  interrupt coalescing is active, then the interrupt may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *  happen immediately, but will wait until either a set number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *  of frames or amount of time have passed).  In NAPI, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *  interrupt handler will signal there is work to be done, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *  exit. This method will start at the last known empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *  descriptor, and process every subsequent descriptor until there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *  are none left with data (NAPI will stop after a set number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *  packets to give time to other tasks, but will eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *  process all the packets).  The data arrives inside a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *  pre-allocated skb, and so after the skb is passed up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *  stack, a new skb must be allocated, and the address field in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  *  the buffer descriptor must be updated to indicate this new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *  skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *  When the kernel requests that a packet be transmitted, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *  driver starts where it left off last time, and points the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *  descriptor at the buffer which was passed in.  The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *  then informs the DMA engine that there are packets ready to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *  be transmitted.  Once the controller is finished transmitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *  the packet, an interrupt may be triggered (under the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *  conditions as for reception, but depending on the TXF bit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  *  The driver then cleans up the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #include <linux/of_mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #include <linux/net_tstamp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #include <asm/mpc85xx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #include <linux/phy_fixed.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #include "gianfar.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define TX_TIMEOUT      (5*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) MODULE_AUTHOR("Freescale Semiconductor, Inc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) MODULE_DESCRIPTION("Gianfar Ethernet Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			    dma_addr_t buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	u32 lstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	bdp->bufPtr = cpu_to_be32(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		lstatus |= BD_LFLAG(RXBD_WRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	gfar_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	bdp->lstatus = cpu_to_be32(lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static void gfar_init_tx_rx_base(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	u32 __iomem *baddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	baddr = &regs->tbase0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		baddr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	baddr = &regs->rbase0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		baddr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static void gfar_init_rqprm(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	u32 __iomem *baddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	baddr = &regs->rqprm0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		baddr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void gfar_rx_offload_en(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	/* set this when rx hw offload (TOE) functions are being used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	priv->uses_rxfcb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		priv->uses_rxfcb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (priv->hwts_rx_en || priv->rx_filer_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		priv->uses_rxfcb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static void gfar_mac_rx_config(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	u32 rctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	if (priv->rx_filer_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		/* Program the RIR0 reg with the required distribution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		if (priv->poll_mode == GFAR_SQ_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		else /* GFAR_MQ_POLLING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	/* Restore PROMISC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (priv->ndev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		rctrl |= RCTRL_PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if (priv->ndev->features & NETIF_F_RXCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		rctrl |= RCTRL_CHECKSUMMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (priv->extended_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (priv->padding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		rctrl &= ~RCTRL_PAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		rctrl |= RCTRL_PADDING(priv->padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	/* Enable HW time stamping if requested from user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	if (priv->hwts_rx_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	/* Clear the LFC bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	gfar_write(&regs->rctrl, rctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	/* Init flow control threshold values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	gfar_init_rqprm(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	rctrl |= RCTRL_LFC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	/* Init rctrl based on our settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	gfar_write(&regs->rctrl, rctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void gfar_mac_tx_config(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	u32 tctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (priv->ndev->features & NETIF_F_IP_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		tctrl |= TCTRL_INIT_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (priv->prio_sched_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		tctrl |= TCTRL_TXSCHED_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		tctrl |= TCTRL_TXSCHED_WRRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		tctrl |= TCTRL_VLINS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	gfar_write(&regs->tctrl, tctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static void gfar_configure_coalescing(struct gfar_private *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			       unsigned long tx_mask, unsigned long rx_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	u32 __iomem *baddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (priv->mode == MQ_MG_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		baddr = &regs->txic0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			gfar_write(baddr + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			if (likely(priv->tx_queue[i]->txcoalescing))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 				gfar_write(baddr + i, priv->tx_queue[i]->txic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		baddr = &regs->rxic0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			gfar_write(baddr + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			if (likely(priv->rx_queue[i]->rxcoalescing))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		/* Backward compatible case -- even if we enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		 * multiple queues, there's only single reg to program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		gfar_write(&regs->txic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		if (likely(priv->tx_queue[0]->txcoalescing))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		gfar_write(&regs->rxic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		if (unlikely(priv->rx_queue[0]->rxcoalescing))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static void gfar_configure_coalescing_all(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	gfar_configure_coalescing(priv, 0xFF, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static struct net_device_stats *gfar_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	unsigned long tx_packets = 0, tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		rx_packets += priv->rx_queue[i]->stats.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	dev->stats.rx_packets = rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	dev->stats.rx_bytes   = rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	dev->stats.rx_dropped = rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		tx_packets += priv->tx_queue[i]->stats.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	dev->stats.tx_bytes   = tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	dev->stats.tx_packets = tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) /* Set the appropriate hash bit for the given addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /* The algorithm works like so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * 1) Take the Destination Address (ie the multicast address), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * do a CRC on it (little endian), and reverse the bits of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * 2) Use the 8 most significant bits as a hash into a 256-entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * table.  The table is controlled through 8 32-bit registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * gaddr7.  This means that the 3 most significant bits in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * hash index which gaddr register to use, and the 5 other bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  * indicate which bit (assuming an IBM numbering scheme, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * for PowerPC (tm) is usually the case) in the register holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * the entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	u32 result = ether_crc(ETH_ALEN, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	int width = priv->hash_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	u8 whichbit = (result >> (32 - width)) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	u8 whichreg = result >> (32 - width + 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	u32 value = (1 << (31-whichbit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	tempval = gfar_read(priv->hash_regs[whichreg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	tempval |= value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	gfar_write(priv->hash_regs[whichreg], tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) /* There are multiple MAC Address register pairs on some controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * This function sets the numth pair to a given address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static void gfar_set_mac_for_addr(struct net_device *dev, int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				  const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	u32 __iomem *macptr = &regs->macstnaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	macptr += num*2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/* For a station address of 0x12345678ABCD in transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * MACnADDR2 is set to 0x34120000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	tempval = (addr[5] << 24) | (addr[4] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		  (addr[3] << 8)  |  addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	gfar_write(macptr, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	tempval = (addr[1] << 24) | (addr[0] << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	gfar_write(macptr+1, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static int gfar_set_mac_addr(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	ret = eth_mac_addr(dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static void gfar_ints_disable(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		/* Clear IEVENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		/* Initialize IMASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static void gfar_ints_enable(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		struct gfar __iomem *regs = priv->gfargrp[i].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		/* Unmask the interrupts we look for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		gfar_write(&regs->imask, IMASK_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static int gfar_alloc_tx_queues(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 					    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		if (!priv->tx_queue[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		priv->tx_queue[i]->tx_skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		priv->tx_queue[i]->qindex = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		priv->tx_queue[i]->dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		spin_lock_init(&(priv->tx_queue[i]->txlock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) static int gfar_alloc_rx_queues(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 					    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		if (!priv->rx_queue[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		priv->rx_queue[i]->qindex = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		priv->rx_queue[i]->ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static void gfar_free_tx_queues(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	for (i = 0; i < priv->num_tx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		kfree(priv->tx_queue[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static void gfar_free_rx_queues(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	for (i = 0; i < priv->num_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		kfree(priv->rx_queue[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static void unmap_group_regs(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	for (i = 0; i < MAXGROUPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		if (priv->gfargrp[i].regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			iounmap(priv->gfargrp[i].regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static void free_gfar_dev(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	for (i = 0; i < priv->num_grps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		for (j = 0; j < GFAR_NUM_IRQS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			kfree(priv->gfargrp[i].irqinfo[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			priv->gfargrp[i].irqinfo[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	free_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static void disable_napi(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		napi_disable(&priv->gfargrp[i].napi_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		napi_disable(&priv->gfargrp[i].napi_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) static void enable_napi(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		napi_enable(&priv->gfargrp[i].napi_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		napi_enable(&priv->gfargrp[i].napi_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static int gfar_parse_group(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			    struct gfar_private *priv, const char *model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	for (i = 0; i < GFAR_NUM_IRQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		if (!grp->irqinfo[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	grp->regs = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (!grp->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	/* If we aren't the FEC we have multiple interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (model && strcasecmp(model, "FEC")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		if (!gfar_irq(grp, TX)->irq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		    !gfar_irq(grp, RX)->irq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		    !gfar_irq(grp, ER)->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	grp->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	spin_lock_init(&grp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (priv->mode == MQ_MG_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		u32 rxq_mask, txq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			grp->rx_bit_map = rxq_mask ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			grp->tx_bit_map = txq_mask ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		if (priv->poll_mode == GFAR_SQ_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		grp->rx_bit_map = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		grp->tx_bit_map = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 * right to left, so we need to revert the 8 bits to get the q index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 * also assign queues to groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		if (!grp->rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			grp->rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		grp->num_rx_queues++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		priv->rx_queue[i]->grp = grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		if (!grp->tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			grp->tx_queue = priv->tx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		grp->num_tx_queues++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		priv->tqueue |= (TQUEUE_EN0 >> i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		priv->tx_queue[i]->grp = grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	priv->num_grps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static int gfar_of_group_count(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct device_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	for_each_available_child_of_node(np, child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		if (of_node_name_eq(child, "queue-group"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) /* Reads the controller's registers to determine what interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * connects it to the PHY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static phy_interface_t gfar_get_interface(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	u32 ecntrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	ecntrl = gfar_read(&regs->ecntrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (ecntrl & ECNTRL_SGMII_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		return PHY_INTERFACE_MODE_SGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (ecntrl & ECNTRL_TBI_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		if (ecntrl & ECNTRL_REDUCED_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			return PHY_INTERFACE_MODE_RTBI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			return PHY_INTERFACE_MODE_TBI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (ecntrl & ECNTRL_REDUCED_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			return PHY_INTERFACE_MODE_RMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			phy_interface_t interface = priv->interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			/* This isn't autodetected right now, so it must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			 * be set by the device tree or platform code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				return PHY_INTERFACE_MODE_RGMII_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			return PHY_INTERFACE_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		return PHY_INTERFACE_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	return PHY_INTERFACE_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	const char *model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	const void *mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	int err = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	phy_interface_t interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	struct gfar_private *priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	struct device_node *child = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	u32 stash_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	u32 stash_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	unsigned int num_tx_qs, num_rx_qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	unsigned short mode, poll_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (of_device_is_compatible(np, "fsl,etsec2")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		mode = MQ_MG_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		poll_mode = GFAR_SQ_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		mode = SQ_SG_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		poll_mode = GFAR_SQ_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (mode == SQ_SG_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		num_tx_qs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		num_rx_qs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	} else { /* MQ_MG_MODE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		/* get the actual number of supported groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		unsigned int num_grps = gfar_of_group_count(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		if (num_grps == 0 || num_grps > MAXGROUPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 				num_grps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			pr_err("Cannot do alloc_etherdev, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		if (poll_mode == GFAR_SQ_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			num_tx_qs = num_grps; /* one txq per int group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			num_rx_qs = num_grps; /* one rxq per int group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		} else { /* GFAR_MQ_POLLING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			u32 tx_queues, rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			/* parse the num of HW tx and rx queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			ret = of_property_read_u32(np, "fsl,num_tx_queues",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 						   &tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			num_tx_qs = ret ? 1 : tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			ret = of_property_read_u32(np, "fsl,num_rx_queues",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 						   &rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			num_rx_qs = ret ? 1 : rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (num_tx_qs > MAX_TX_QS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		       num_tx_qs, MAX_TX_QS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		pr_err("Cannot do alloc_etherdev, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (num_rx_qs > MAX_RX_QS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		       num_rx_qs, MAX_RX_QS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		pr_err("Cannot do alloc_etherdev, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	dev = *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (NULL == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	priv->ndev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	priv->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	priv->poll_mode = poll_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	priv->num_tx_queues = num_tx_qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	netif_set_real_num_rx_queues(dev, num_rx_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	priv->num_rx_queues = num_rx_qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	err = gfar_alloc_tx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		goto tx_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	err = gfar_alloc_rx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		goto rx_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	err = of_property_read_string(np, "model", &model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		pr_err("Device model property missing, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		goto rx_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/* Init Rx queue filer rule set linked list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	INIT_LIST_HEAD(&priv->rx_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	priv->rx_list.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	mutex_init(&priv->rx_queue_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	for (i = 0; i < MAXGROUPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		priv->gfargrp[i].regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/* Parse and initialize group specific information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (priv->mode == MQ_MG_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		for_each_available_child_of_node(np, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			if (!of_node_name_eq(child, "queue-group"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			err = gfar_parse_group(child, priv, model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 				of_node_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 				goto err_grp_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	} else { /* SQ_SG_MODE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		err = gfar_parse_group(np, priv, model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			goto err_grp_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (of_property_read_bool(np, "bd-stash")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		priv->bd_stash_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		priv->rx_stash_size = stash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		priv->rx_stash_index = stash_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (stash_len || stash_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	mac_addr = of_get_mac_address(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (!IS_ERR(mac_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		ether_addr_copy(dev->dev_addr, mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		eth_hw_addr_random(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (model && !strcasecmp(model, "TSEC"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				     FSL_GIANFAR_DEV_HAS_COALESCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				     FSL_GIANFAR_DEV_HAS_RMON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	if (model && !strcasecmp(model, "eTSEC"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				     FSL_GIANFAR_DEV_HAS_COALESCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				     FSL_GIANFAR_DEV_HAS_RMON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				     FSL_GIANFAR_DEV_HAS_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 				     FSL_GIANFAR_DEV_HAS_VLAN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 				     FSL_GIANFAR_DEV_HAS_TIMER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 				     FSL_GIANFAR_DEV_HAS_RX_FILER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	/* Use PHY connection type from the DT node if one is specified there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 * rgmii-id really needs to be specified. Other types can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	 * detected by hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	err = of_get_phy_mode(np, &interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		priv->interface = interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		priv->interface = gfar_get_interface(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (of_find_property(np, "fsl,magic-packet", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (of_get_property(np, "fsl,wake-on-filer", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	/* In the case of a fixed PHY, the DT node associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * to the PHY is the Ethernet MAC DT node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		err = of_phy_register_fixed_link(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			goto err_grp_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		priv->phy_node = of_node_get(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) err_grp_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	unmap_group_regs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) rx_alloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	gfar_free_rx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) tx_alloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	gfar_free_tx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	free_gfar_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				   u32 class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	u32 rqfpr = FPR_FILER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	u32 rqfcr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	rqfar--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	priv->ftp_rqfpr[rqfar] = rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	priv->ftp_rqfcr[rqfar] = rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	rqfar--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	rqfcr = RQFCR_CMP_NOMATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	priv->ftp_rqfpr[rqfar] = rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	priv->ftp_rqfcr[rqfar] = rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	rqfar--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	rqfpr = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	priv->ftp_rqfcr[rqfar] = rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	priv->ftp_rqfpr[rqfar] = rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	rqfar--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	rqfpr = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	priv->ftp_rqfcr[rqfar] = rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	priv->ftp_rqfpr[rqfar] = rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	return rqfar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static void gfar_init_filer_table(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	int i = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	u32 rqfar = MAX_FILER_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	u32 rqfcr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	u32 rqfpr = FPR_FILER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/* Default rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	rqfcr = RQFCR_CMP_MATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	priv->ftp_rqfcr[rqfar] = rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	priv->ftp_rqfpr[rqfar] = rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	/* cur_filer_idx indicated the first non-masked rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	priv->cur_filer_idx = rqfar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	/* Rest are masked rules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	rqfcr = RQFCR_CMP_NOMATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	for (i = 0; i < rqfar; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		priv->ftp_rqfcr[i] = rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		priv->ftp_rqfpr[i] = rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		gfar_write_filer(priv, i, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static void __gfar_detect_errata_83xx(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	unsigned int pvr = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	unsigned int svr = mfspr(SPRN_SVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	unsigned int rev = svr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	/* MPC8313 Rev 2.0 and higher; All MPC837x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		priv->errata |= GFAR_ERRATA_74;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/* MPC8313 and MPC837x all rev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if ((pvr == 0x80850010 && mod == 0x80b0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		priv->errata |= GFAR_ERRATA_76;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/* MPC8313 Rev < 2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		priv->errata |= GFAR_ERRATA_12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static void __gfar_detect_errata_85xx(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	unsigned int svr = mfspr(SPRN_SVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		priv->errata |= GFAR_ERRATA_12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static void gfar_detect_errata(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	struct device *dev = &priv->ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/* no plans to fix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	priv->errata |= GFAR_ERRATA_A002;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		__gfar_detect_errata_85xx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	else /* non-mpc85xx parts, i.e. e300 core based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		__gfar_detect_errata_83xx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (priv->errata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			 priv->errata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) static void gfar_init_addr_hash_table(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		priv->extended_hash = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		priv->hash_width = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		priv->hash_regs[0] = &regs->igaddr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		priv->hash_regs[1] = &regs->igaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		priv->hash_regs[2] = &regs->igaddr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		priv->hash_regs[3] = &regs->igaddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		priv->hash_regs[4] = &regs->igaddr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		priv->hash_regs[5] = &regs->igaddr5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		priv->hash_regs[6] = &regs->igaddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		priv->hash_regs[7] = &regs->igaddr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		priv->hash_regs[8] = &regs->gaddr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		priv->hash_regs[9] = &regs->gaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		priv->hash_regs[10] = &regs->gaddr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		priv->hash_regs[11] = &regs->gaddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		priv->hash_regs[12] = &regs->gaddr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		priv->hash_regs[13] = &regs->gaddr5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		priv->hash_regs[14] = &regs->gaddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		priv->hash_regs[15] = &regs->gaddr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		priv->extended_hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		priv->hash_width = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		priv->hash_regs[0] = &regs->gaddr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		priv->hash_regs[1] = &regs->gaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		priv->hash_regs[2] = &regs->gaddr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		priv->hash_regs[3] = &regs->gaddr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		priv->hash_regs[4] = &regs->gaddr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		priv->hash_regs[5] = &regs->gaddr5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		priv->hash_regs[6] = &regs->gaddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		priv->hash_regs[7] = &regs->gaddr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int __gfar_is_rx_idle(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	u32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* Normaly TSEC should not hang on GRS commands, so we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 * actually wait for IEVENT_GRSC flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	 * and the Rx can be safely reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	res &= 0x7f807f80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if ((res & 0xffff) == (res >> 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Halt the receive and transmit queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static void gfar_halt_nodisable(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	unsigned int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	int stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	gfar_ints_disable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (gfar_is_dma_stopped(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/* Stop the DMA, and wait for it to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	tempval = gfar_read(&regs->dmactrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	gfar_write(&regs->dmactrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		timeout--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		stopped = gfar_is_dma_stopped(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	    !__gfar_is_rx_idle(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Halt the receive and transmit queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static void gfar_halt(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	/* Dissable the Rx/Tx hw queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	gfar_write(&regs->rqueue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	gfar_write(&regs->tqueue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	gfar_halt_nodisable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	/* Disable Rx/Tx DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	tempval = gfar_read(&regs->maccfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	gfar_write(&regs->maccfg1, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct txbd8 *txbdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct gfar_private *priv = netdev_priv(tx_queue->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	txbdp = tx_queue->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	for (i = 0; i < tx_queue->tx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		if (!tx_queue->tx_skbuff[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		txbdp->lstatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		     j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			txbdp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				       be16_to_cpu(txbdp->length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 				       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		txbdp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		tx_queue->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	kfree(tx_queue->tx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	tx_queue->tx_skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	dev_kfree_skb(rx_queue->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		rxbdp->lstatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		rxbdp->bufPtr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		rxbdp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		if (!rxb->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		dma_unmap_page(rx_queue->dev, rxb->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			       PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		__free_page(rxb->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		rxb->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	kfree(rx_queue->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	rx_queue->rx_buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* If there are any tx skbs or rx skbs still around, free them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  * Then free tx_skbuff and rx_skbuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static void free_skb_resources(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	struct gfar_priv_tx_q *tx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	struct gfar_priv_rx_q *rx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/* Go through all the buffer descriptors and free their data buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		tx_queue = priv->tx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		if (tx_queue->tx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			free_skb_tx_queue(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		netdev_tx_reset_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		if (rx_queue->rx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			free_skb_rx_queue(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	dma_free_coherent(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			  sizeof(struct txbd8) * priv->total_tx_ring_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			  priv->tx_queue[0]->tx_bd_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			  priv->tx_queue[0]->tx_bd_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) void stop_gfar(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	netif_tx_stop_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	set_bit(GFAR_DOWN, &priv->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	disable_napi(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/* disable ints and gracefully shut down Rx/Tx DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	gfar_halt(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	phy_stop(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	free_skb_resources(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static void gfar_start(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	/* Enable Rx/Tx hw queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	gfar_write(&regs->rqueue, priv->rqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	gfar_write(&regs->tqueue, priv->tqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	/* Initialize DMACTRL to have WWR and WOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	tempval = gfar_read(&regs->dmactrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	tempval |= DMACTRL_INIT_SETTINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	gfar_write(&regs->dmactrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	/* Make sure we aren't stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	tempval = gfar_read(&regs->dmactrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	gfar_write(&regs->dmactrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		regs = priv->gfargrp[i].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		/* Clear THLT/RHLT, so that the DMA starts polling now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	/* Enable Rx/Tx DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	tempval = gfar_read(&regs->maccfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	gfar_write(&regs->maccfg1, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	gfar_ints_enable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	netif_trans_update(priv->ndev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	page = dev_alloc_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	if (unlikely(!page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	rxb->dma = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	rxb->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	rxb->page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	struct gfar_extra_stats *estats = &priv->extra_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	atomic64_inc(&estats->rx_alloc_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				int alloc_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	struct rxbd8 *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	struct gfar_rx_buff *rxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	i = rx_queue->next_to_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	bdp = &rx_queue->rx_bd_base[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	rxb = &rx_queue->rx_buff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	while (alloc_cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		/* try reuse page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		if (unlikely(!rxb->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 				gfar_rx_alloc_err(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		/* Setup the new RxBD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		gfar_init_rxbdp(rx_queue, bdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		/* Update to the next pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		bdp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		rxb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		if (unlikely(++i == rx_queue->rx_ring_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			bdp = rx_queue->rx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			rxb = rx_queue->rx_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	rx_queue->next_to_use = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	rx_queue->next_to_alloc = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void gfar_init_bds(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct gfar_priv_tx_q *tx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	struct gfar_priv_rx_q *rx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	struct txbd8 *txbdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	u32 __iomem *rfbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		tx_queue = priv->tx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		/* Initialize some variables in our dev structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		tx_queue->dirty_tx = tx_queue->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		tx_queue->cur_tx = tx_queue->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		tx_queue->skb_curtx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		tx_queue->skb_dirtytx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		/* Initialize Transmit Descriptor Ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		txbdp = tx_queue->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		for (j = 0; j < tx_queue->tx_ring_size; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			txbdp->lstatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			txbdp->bufPtr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			txbdp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		/* Set the last descriptor in the ring to indicate wrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		txbdp--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 					    TXBD_WRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	rfbptr = &regs->rfbptr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		rx_queue->next_to_clean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		rx_queue->next_to_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		rx_queue->next_to_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		/* make sure next_to_clean != next_to_use after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		 * by leaving at least 1 unused descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		rx_queue->rfbptr = rfbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		rfbptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static int gfar_alloc_skb_resources(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	struct gfar_priv_tx_q *tx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	struct gfar_priv_rx_q *rx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	priv->total_tx_ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	for (i = 0; i < priv->num_tx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	priv->total_rx_ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	for (i = 0; i < priv->num_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	/* Allocate memory for the buffer descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	vaddr = dma_alloc_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 				   (priv->total_tx_ring_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				    sizeof(struct txbd8)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				   (priv->total_rx_ring_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 				    sizeof(struct rxbd8)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 				   &addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		tx_queue = priv->tx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		tx_queue->tx_bd_base = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		tx_queue->tx_bd_dma_base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		tx_queue->dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		/* enet DMA only understands physical addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	/* Start the rx descriptor ring where the tx ring leaves off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		rx_queue->rx_bd_base = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		rx_queue->rx_bd_dma_base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		rx_queue->ndev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		rx_queue->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	/* Setup the skbuff rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		tx_queue = priv->tx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		tx_queue->tx_skbuff =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			kmalloc_array(tx_queue->tx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 				      sizeof(*tx_queue->tx_skbuff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		if (!tx_queue->tx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		for (j = 0; j < tx_queue->tx_ring_size; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			tx_queue->tx_skbuff[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 					    sizeof(*rx_queue->rx_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 					    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		if (!rx_queue->rx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	gfar_init_bds(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	free_skb_resources(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Bring the controller up and running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) int startup_gfar(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	gfar_mac_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	err = gfar_alloc_skb_resources(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	gfar_init_tx_rx_base(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	clear_bit(GFAR_DOWN, &priv->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	/* Start Rx/Tx DMA and enable the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	gfar_start(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	/* force link state update after mac reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	priv->oldlink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	priv->oldspeed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	priv->oldduplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	phy_start(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	enable_napi(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	netif_tx_wake_all_queues(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	struct phy_device *phydev = ndev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	if (!phydev->duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	if (!priv->pause_aneg_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		if (priv->tx_pause_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			val |= MACCFG1_TX_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		if (priv->rx_pause_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			val |= MACCFG1_RX_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		u16 lcl_adv, rmt_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		u8 flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		/* get link partner capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		rmt_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		if (phydev->pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			rmt_adv = LPA_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		if (phydev->asym_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			rmt_adv |= LPA_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		if (flowctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			val |= MACCFG1_TX_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		if (flowctrl & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			val |= MACCFG1_RX_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static noinline void gfar_update_link_state(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	struct phy_device *phydev = ndev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	struct gfar_priv_rx_q *rx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		u32 tempval1 = gfar_read(&regs->maccfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		u32 tempval = gfar_read(&regs->maccfg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		u32 ecntrl = gfar_read(&regs->ecntrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		if (phydev->duplex != priv->oldduplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			if (!(phydev->duplex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 				tempval &= ~(MACCFG2_FULL_DUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				tempval |= MACCFG2_FULL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			priv->oldduplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		if (phydev->speed != priv->oldspeed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			switch (phydev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			case 1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 				tempval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 				ecntrl &= ~(ECNTRL_R100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			case 100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 				tempval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 				/* Reduced mode distinguishes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 				 * between 10 and 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 				if (phydev->speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 					ecntrl |= ECNTRL_R100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 					ecntrl &= ~(ECNTRL_R100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 				netif_warn(priv, link, priv->ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 					   "Ack!  Speed (%d) is not 10/100/1000!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 					   phydev->speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			priv->oldspeed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		tempval1 |= gfar_get_flowctrl_cfg(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		/* Turn last free buffer recording on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 				u32 bdp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 				gfar_write(rx_queue->rfbptr, bdp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			priv->tx_actual_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			priv->tx_actual_en = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		gfar_write(&regs->maccfg1, tempval1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		gfar_write(&regs->maccfg2, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		gfar_write(&regs->ecntrl, ecntrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		if (!priv->oldlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 			priv->oldlink = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	} else if (priv->oldlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		priv->oldlink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		priv->oldspeed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		priv->oldduplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (netif_msg_link(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		phy_print_status(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Called every time the controller might need to be made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  * aware of new link state.  The PHY code conveys this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)  * information through variables in the phydev structure, and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)  * function converts those variables into the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  * register values, and can bring down the device if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void adjust_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	struct phy_device *phydev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	if (unlikely(phydev->link != priv->oldlink ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		     (phydev->link && (phydev->duplex != priv->oldduplex ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 				       phydev->speed != priv->oldspeed))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		gfar_update_link_state(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /* Initialize TBI PHY interface for communicating with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  * SERDES lynx PHY on the chip.  We communicate with this PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  * through the MDIO bus on each controller, treating it as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)  * "normal" PHY at the address found in the TBIPA register.  We assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)  * that the TBIPA register is valid.  Either the MDIO bus code will set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  * it to a value that doesn't conflict with other PHYs on the bus, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  * value doesn't matter, as there are no other PHYs on the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) static void gfar_configure_serdes(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	struct phy_device *tbiphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (!priv->tbi_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		dev_warn(&dev->dev, "error: SGMII mode requires that the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 				    "device tree specify a tbi-handle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	tbiphy = of_phy_find_device(priv->tbi_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	if (!tbiphy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		dev_err(&dev->dev, "error: Could not get TBI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	/* If the link is already up, we must already be ok, and don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	 * everything for us?  Resetting it takes the link down and requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	 * several seconds for it to come back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		put_device(&tbiphy->mdio.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	/* Single clk mode, mii mode off(for serdes communication) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	phy_write(tbiphy, MII_ADVERTISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		  ADVERTISE_1000XPSE_ASYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	phy_write(tbiphy, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		  BMCR_SPEED1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	put_device(&tbiphy->mdio.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /* Initializes driver's PHY state, and attaches to the PHY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)  * Returns 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static int init_phy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	phy_interface_t interface = priv->interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	struct ethtool_eee edata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	linkmode_set_bit_array(phy_10_100_features_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			       ARRAY_SIZE(phy_10_100_features_array),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			       mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	priv->oldlink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	priv->oldspeed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	priv->oldduplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 				interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	if (!phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		dev_err(&dev->dev, "could not attach to PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (interface == PHY_INTERFACE_MODE_SGMII)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		gfar_configure_serdes(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	/* Remove any features not supported by the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	linkmode_and(phydev->supported, phydev->supported, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	linkmode_copy(phydev->advertising, phydev->supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	/* Add support for flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	phy_support_asym_pause(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	/* disable EEE autoneg, EEE not supported by eTSEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	memset(&edata, 0, sizeof(struct ethtool_eee));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	phy_ethtool_set_eee(phydev, &edata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	memset(fcb, 0, GMAC_FCB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	return fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 				    int fcb_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	/* If we're here, it's a IP packet with a TCP or UDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	 * payload.  We set it to checksum, using a pseudo-header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	 * we provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	u8 flags = TXFCB_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	/* Tell the controller what the protocol is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	 * And provide the already calculated phcs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		flags |= TXFCB_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	/* l3os is the distance between the start of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	 * frame (skb->data) and the start of the IP hdr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	 * l4os is the distance between the start of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	 * l3 hdr and the l4 hdr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	fcb->l4os = skb_network_header_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	fcb->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	fcb->flags |= TXFCB_VLN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 				      struct txbd8 *base, int ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	struct txbd8 *new_bd = bdp + stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 				      int ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	return skip_txbd(bdp, 1, base, ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* eTSEC12: csum generation not supported for some fcb offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static inline bool gfar_csum_errata_12(struct gfar_private *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 				       unsigned long fcb_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	       (fcb_addr % 0x20) > 0x18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* eTSEC76: csum generation for frames larger than 2500 may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)  * cause excess delays before start of transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) static inline bool gfar_csum_errata_76(struct gfar_private *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 				       unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	       (len > 2500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /* This is called by the kernel when a frame is ready for transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  * It is pointed to by the dev->hard_start_xmit function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	struct gfar_priv_tx_q *tx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	struct gfar __iomem *regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	struct txfcb *fcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	u32 lstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	int i, rq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	int do_tstamp, do_csum, do_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	u32 bufaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	rq = skb->queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	tx_queue = priv->tx_queue[rq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	txq = netdev_get_tx_queue(dev, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	base = tx_queue->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	regs = tx_queue->grp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	do_vlan = skb_vlan_tag_present(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		    priv->hwts_tx_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	if (do_csum || do_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		fcb_len = GMAC_FCB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	/* check if time stamp should be generated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	if (unlikely(do_tstamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	/* make space for additional header when fcb is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	if (fcb_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		if (unlikely(skb_cow_head(skb, fcb_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 			dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	/* total number of fragments in the SKB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	/* calculate the required number of TxBDs for this skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (unlikely(do_tstamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		nr_txbds = nr_frags + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		nr_txbds = nr_frags + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	/* check if there is space to queue this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	if (nr_txbds > tx_queue->num_txbdfree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		/* no space, stop the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	/* Update transmit stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	bytes_sent = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	tx_queue->stats.tx_bytes += bytes_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	/* keep Tx bytes on wire for BQL accounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	GFAR_CB(skb)->bytes_sent = bytes_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	tx_queue->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	txbdp = txbdp_start = tx_queue->cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	lstatus = be32_to_cpu(txbdp->lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	/* Add TxPAL between FCB and frame if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	if (unlikely(do_tstamp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		skb_push(skb, GMAC_TXPAL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		memset(skb->data, 0, GMAC_TXPAL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	/* Add TxFCB if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (fcb_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		fcb = gfar_add_fcb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		lstatus |= BD_LFLAG(TXBD_TOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	/* Set up checksumming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (do_csum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		gfar_tx_checksum(skb, fcb, fcb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			__skb_pull(skb, GMAC_FCB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			skb_checksum_help(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			if (do_vlan || do_tstamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 				/* put back a new fcb for vlan/tstamp TOE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 				fcb = gfar_add_fcb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 				/* Tx TOE not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 				lstatus &= ~(BD_LFLAG(TXBD_TOE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 				fcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	if (do_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		gfar_tx_vlan(skb, fcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		goto dma_map_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	/* Time stamp insertion requires one additional TxBD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	if (unlikely(do_tstamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 						 tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	if (likely(!nr_frags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		if (likely(!do_tstamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		u32 lstatus_start = lstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		/* Place the fragment addresses and lengths into the TxBDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		frag = &skb_shinfo(skb)->frags[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		for (i = 0; i < nr_frags; i++, frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			/* Point at the next BD, wrapping as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			size = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			lstatus = be32_to_cpu(txbdp->lstatus) | size |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 				  BD_LFLAG(TXBD_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 			/* Handle the last BD specially */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			if (i == nr_frags - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 			bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 						   size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 				goto dma_map_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			/* set the TxBD length and buffer pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			txbdp->bufPtr = cpu_to_be32(bufaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			txbdp->lstatus = cpu_to_be32(lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		lstatus = lstatus_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	/* If time stamping is requested one additional TxBD must be set up. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	 * first TxBD points to the FCB and must have a data length of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	 * the full frame length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	if (unlikely(do_tstamp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		bufaddr += fcb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		lstatus_ts |= BD_LFLAG(TXBD_READY) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			      (skb_headlen(skb) - fcb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (!nr_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		/* Setup tx hardware time stamping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		fcb->ptp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	netdev_tx_sent_queue(txq, bytes_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	gfar_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	txbdp_start->lstatus = cpu_to_be32(lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	gfar_wmb(); /* force lstatus write before tx_skbuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	/* Update the current skb pointer to the next entry we will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	 * (wrapping if necessary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	/* We can work in parallel with gfar_clean_tx_ring(), except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	 * when modifying num_txbdfree. Note that we didn't grab the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	 * when we were reading the num_txbdfree and checking for available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	 * space, that's because outside of this function it can only grow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	spin_lock_bh(&tx_queue->txlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	/* reduce TxBD free count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	tx_queue->num_txbdfree -= (nr_txbds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	spin_unlock_bh(&tx_queue->txlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	/* If the next BD still needs to be cleaned up, then the bds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	 * are full.  We need to tell the kernel to stop sending us stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (!tx_queue->num_txbdfree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	/* Tell the DMA to go go go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) dma_map_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	if (do_tstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	for (i = 0; i < nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		lstatus = be32_to_cpu(txbdp->lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		if (!(lstatus & BD_LFLAG(TXBD_READY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		lstatus &= ~BD_LFLAG(TXBD_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		txbdp->lstatus = cpu_to_be32(lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		bufaddr = be32_to_cpu(txbdp->bufPtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 			       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	gfar_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /* Changes the mac address if the controller is not running. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static int gfar_set_mac_address(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static int gfar_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		stop_gfar(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		startup_gfar(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static void reset_gfar(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	stop_gfar(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	startup_gfar(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	clear_bit_unlock(GFAR_RESETTING, &priv->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) /* gfar_reset_task gets scheduled when a packet has not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)  * transmitted after a set amount of time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)  * For now, assume that clearing out all the structures, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * starting over will fix the problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static void gfar_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	struct gfar_private *priv = container_of(work, struct gfar_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 						 reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	reset_gfar(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	schedule_work(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	struct hwtstamp_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	struct gfar_private *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	/* reserved for future extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (config.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	switch (config.tx_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	case HWTSTAMP_TX_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		priv->hwts_tx_en = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	case HWTSTAMP_TX_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 			return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		priv->hwts_tx_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	switch (config.rx_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	case HWTSTAMP_FILTER_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		if (priv->hwts_rx_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			priv->hwts_rx_en = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 			reset_gfar(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		if (!priv->hwts_rx_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			priv->hwts_rx_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 			reset_gfar(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		config.rx_filter = HWTSTAMP_FILTER_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		-EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	struct hwtstamp_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	struct gfar_private *priv = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	config.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	config.rx_filter = (priv->hwts_rx_en ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		-EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	struct phy_device *phydev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	if (cmd == SIOCSHWTSTAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		return gfar_hwtstamp_set(dev, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (cmd == SIOCGHWTSTAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		return gfar_hwtstamp_get(dev, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	if (!phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	return phy_mii_ioctl(phydev, rq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /* Interrupt Handler for Transmit complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	struct net_device *dev = tx_queue->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	struct txbd8 *bdp, *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	struct txbd8 *lbdp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	struct txbd8 *base = tx_queue->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	int skb_dirtytx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	int tx_ring_size = tx_queue->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	int frags = 0, nr_txbds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	int howmany = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	int tqi = tx_queue->qindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	unsigned int bytes_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	u32 lstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	size_t buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	txq = netdev_get_tx_queue(dev, tqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	bdp = tx_queue->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	skb_dirtytx = tx_queue->skb_dirtytx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		bool do_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			    priv->hwts_tx_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		/* When time stamping, one additional TxBD must be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		 * Also, we need to dma_unmap_single() the TxPAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		if (unlikely(do_tstamp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			nr_txbds = frags + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			nr_txbds = frags + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		lstatus = be32_to_cpu(lbdp->lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		/* Only clean completed frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		    (lstatus & BD_LENGTH_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		if (unlikely(do_tstamp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 			next = next_txbd(bdp, base, tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 			buflen = be16_to_cpu(next->length) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 			buflen = be16_to_cpu(bdp->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 				 buflen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		if (unlikely(do_tstamp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			struct skb_shared_hwtstamps shhwtstamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 					  ~0x7UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 			skb_tstamp_tx(skb, &shhwtstamps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 			gfar_clear_txbd_status(bdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			bdp = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		gfar_clear_txbd_status(bdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		bdp = next_txbd(bdp, base, tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		for (i = 0; i < frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 				       be16_to_cpu(bdp->length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 				       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 			gfar_clear_txbd_status(bdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			bdp = next_txbd(bdp, base, tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		bytes_sent += GFAR_CB(skb)->bytes_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		skb_dirtytx = (skb_dirtytx + 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			      TX_RING_MOD_MASK(tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		howmany++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		spin_lock(&tx_queue->txlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		tx_queue->num_txbdfree += nr_txbds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		spin_unlock(&tx_queue->txlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	/* If we freed a buffer, we can restart transmission, if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	if (tx_queue->num_txbdfree &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	    netif_tx_queue_stopped(txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	    !(test_bit(GFAR_DOWN, &priv->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		netif_wake_subqueue(priv->ndev, tqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	/* Update dirty indicators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	tx_queue->skb_dirtytx = skb_dirtytx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	tx_queue->dirty_tx = bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) static void count_errors(u32 lstatus, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	struct gfar_extra_stats *estats = &priv->extra_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	/* If the packet was truncated, none of the other errors matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		stats->rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		atomic64_inc(&estats->rx_trunc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	/* Count the errors, if there were any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		stats->rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		if (lstatus & BD_LFLAG(RXBD_LARGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			atomic64_inc(&estats->rx_large);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			atomic64_inc(&estats->rx_short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		stats->rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		atomic64_inc(&estats->rx_nonoctet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		atomic64_inc(&estats->rx_crcerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		stats->rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		atomic64_inc(&estats->rx_overrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		stats->rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) static irqreturn_t gfar_receive(int irq, void *grp_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	u32 imask, ievent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	ievent = gfar_read(&grp->regs->ievent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	if (unlikely(ievent & IEVENT_FGPI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		spin_lock_irqsave(&grp->grplock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		imask = gfar_read(&grp->regs->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		imask &= IMASK_RX_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		gfar_write(&grp->regs->imask, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		spin_unlock_irqrestore(&grp->grplock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		__napi_schedule(&grp->napi_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		/* Clear IEVENT, so interrupts aren't called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		 * because of the packets that have already arrived.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) /* Interrupt Handler for Transmit complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) static irqreturn_t gfar_transmit(int irq, void *grp_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	u32 imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	if (likely(napi_schedule_prep(&grp->napi_tx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		spin_lock_irqsave(&grp->grplock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		imask = gfar_read(&grp->regs->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		imask &= IMASK_TX_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		gfar_write(&grp->regs->imask, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		spin_unlock_irqrestore(&grp->grplock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		__napi_schedule(&grp->napi_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		/* Clear IEVENT, so interrupts aren't called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		 * because of the packets that have already arrived.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 			     struct sk_buff *skb, bool first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	int size = lstatus & BD_LENGTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	struct page *page = rxb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	if (likely(first)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		skb_put(skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		/* the last fragments' length contains the full frame length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		if (lstatus & BD_LFLAG(RXBD_LAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 			size -= skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		WARN(size < 0, "gianfar: rx fragment size underflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		if (size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 				rxb->page_offset + RXBUF_ALIGNMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 				size, GFAR_RXB_TRUESIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	/* try reuse page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	/* change offset to the other half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	page_ref_inc(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			       struct gfar_rx_buff *old_rxb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	struct gfar_rx_buff *new_rxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	u16 nta = rxq->next_to_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	new_rxb = &rxq->rx_buff[nta];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	/* find next buf that can reuse a page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	nta++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	/* copy page reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	*new_rxb = *old_rxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	/* sync for use by the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 					 old_rxb->page_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 					    u32 lstatus, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	struct page *page = rxb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	bool first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	if (likely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		void *buff_addr = page_address(page) + rxb->page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			gfar_rx_alloc_err(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		skb_reserve(skb, RXBUF_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		/* reuse the free half of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		gfar_reuse_rx_page(rx_queue, rxb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		/* page cannot be reused, unmap it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		dma_unmap_page(rx_queue->dev, rxb->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			       PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	/* clear rxb content */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	rxb->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	/* If valid headers were found, and valid sums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	 * were verified, then we tell the kernel that no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	 * checksumming is necessary.  Otherwise, it is [FIXME]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	    (RXFCB_CIP | RXFCB_CTU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	struct rxfcb *fcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	/* fcb is at the beginning if exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	fcb = (struct rxfcb *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	/* Remove the FCB from the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	 * Remove the padded bytes, if there are any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	if (priv->uses_rxfcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		skb_pull(skb, GMAC_FCB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	/* Get receive timestamp from the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	if (priv->hwts_rx_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		u64 *ns = (u64 *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	if (priv->padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 		skb_pull(skb, priv->padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	/* Trim off the FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	pskb_trim(skb, skb->len - ETH_FCS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	if (ndev->features & NETIF_F_RXCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		gfar_rx_checksum(skb, fcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	 * Even if vlan rx accel is disabled, on some chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	 * RXFCB_VLN is pseudo randomly set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 				       be16_to_cpu(fcb->vlctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)  * until the budget/quota has been reached. Returns the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)  * of frames handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 			      int rx_work_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	struct net_device *ndev = rx_queue->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	struct gfar_private *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	struct rxbd8 *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	int i, howmany = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	struct sk_buff *skb = rx_queue->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	unsigned int total_bytes = 0, total_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	/* Get the first full descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	i = rx_queue->next_to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	while (rx_work_limit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		u32 lstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 			cleaned_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		bdp = &rx_queue->rx_bd_base[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		lstatus = be32_to_cpu(bdp->lstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		if (lstatus & BD_LFLAG(RXBD_EMPTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		/* lost RXBD_LAST descriptor due to overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		if (skb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		    (lstatus & BD_LFLAG(RXBD_FIRST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			/* discard faulty buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			rx_queue->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			/* can continue normally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		/* order rx buffer descriptor reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		/* fetch next to clean buffer from the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		cleaned_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		howmany++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		if (unlikely(++i == rx_queue->rx_ring_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		rx_queue->next_to_clean = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		/* fetch next buffer if not the last in frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			count_errors(lstatus, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			/* discard faulty buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 			rx_queue->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		gfar_process_frame(ndev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		/* Increment the number of packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		total_pkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		total_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		skb_record_rx_queue(skb, rx_queue->qindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		skb->protocol = eth_type_trans(skb, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		/* Send the packet up the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	/* Store incomplete frames for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	rx_queue->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	rx_queue->stats.rx_packets += total_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	rx_queue->stats.rx_bytes += total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	if (cleaned_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	/* Update Last Free RxBD pointer for LFC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	if (unlikely(priv->tx_actual_en)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		gfar_write(rx_queue->rfbptr, bdp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	return howmany;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	struct gfar_priv_grp *gfargrp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		container_of(napi, struct gfar_priv_grp, napi_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	struct gfar __iomem *regs = gfargrp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	/* Clear IEVENT, so interrupts aren't called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	 * because of the packets that have already arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	work_done = gfar_clean_rx_ring(rx_queue, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		u32 imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 		napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		/* Clear the halt bit in RSTAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		gfar_write(&regs->rstat, gfargrp->rstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 		spin_lock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		imask = gfar_read(&regs->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 		imask |= IMASK_RX_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		gfar_write(&regs->imask, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		spin_unlock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	struct gfar_priv_grp *gfargrp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		container_of(napi, struct gfar_priv_grp, napi_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	struct gfar __iomem *regs = gfargrp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	u32 imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	/* Clear IEVENT, so interrupts aren't called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	 * because of the packets that have already arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	/* run Tx cleanup to completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		gfar_clean_tx_ring(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	spin_lock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	imask = gfar_read(&regs->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	imask |= IMASK_TX_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	gfar_write(&regs->imask, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	spin_unlock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) static int gfar_poll_rx(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	struct gfar_priv_grp *gfargrp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		container_of(napi, struct gfar_priv_grp, napi_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	struct gfar_private *priv = gfargrp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	struct gfar __iomem *regs = gfargrp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	struct gfar_priv_rx_q *rx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	int work_done = 0, work_done_per_q = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	int i, budget_per_q = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	unsigned long rstat_rxf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	int num_act_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	/* Clear IEVENT, so interrupts aren't called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	 * because of the packets that have already arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	gfar_write(&regs->ievent, IEVENT_RX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	if (num_act_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		budget_per_q = budget/num_act_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		/* skip queue if not active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		rx_queue = priv->rx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		work_done_per_q =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 			gfar_clean_rx_ring(rx_queue, budget_per_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		work_done += work_done_per_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		/* finished processing this queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		if (work_done_per_q < budget_per_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 			/* clear active queue hw indication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 			gfar_write(&regs->rstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 				   RSTAT_CLEAR_RXF0 >> i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 			num_act_queues--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 			if (!num_act_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	if (!num_act_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		u32 imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		/* Clear the halt bit in RSTAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		gfar_write(&regs->rstat, gfargrp->rstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		spin_lock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		imask = gfar_read(&regs->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		imask |= IMASK_RX_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		gfar_write(&regs->imask, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		spin_unlock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static int gfar_poll_tx(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	struct gfar_priv_grp *gfargrp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		container_of(napi, struct gfar_priv_grp, napi_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	struct gfar_private *priv = gfargrp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	struct gfar __iomem *regs = gfargrp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	struct gfar_priv_tx_q *tx_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	int has_tx_work = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	/* Clear IEVENT, so interrupts aren't called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	 * because of the packets that have already arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	gfar_write(&regs->ievent, IEVENT_TX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		tx_queue = priv->tx_queue[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		/* run Tx cleanup to completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 			gfar_clean_tx_ring(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 			has_tx_work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	if (!has_tx_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		u32 imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		spin_lock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		imask = gfar_read(&regs->imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		imask |= IMASK_TX_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		gfar_write(&regs->imask, imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		spin_unlock_irq(&gfargrp->grplock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) /* GFAR error interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) static irqreturn_t gfar_error(int irq, void *grp_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	struct gfar_priv_grp *gfargrp = grp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	struct gfar __iomem *regs = gfargrp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	struct gfar_private *priv= gfargrp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	/* Save ievent for future reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	u32 events = gfar_read(&regs->ievent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	/* Clear IEVENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	/* Magic Packet is not an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	    (events & IEVENT_MAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		events &= ~IEVENT_MAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	/* Hmm... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		netdev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 			   events, gfar_read(&regs->imask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	/* Update the error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	if (events & IEVENT_TXE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		if (events & IEVENT_LC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 			dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		if (events & IEVENT_CRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		if (events & IEVENT_XFUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 			netif_dbg(priv, tx_err, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 				  "TX FIFO underrun, packet dropped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 			dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 			atomic64_inc(&priv->extra_stats.tx_underrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 			schedule_work(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	if (events & IEVENT_BSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		atomic64_inc(&priv->extra_stats.rx_bsy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 			  gfar_read(&regs->rstat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	if (events & IEVENT_BABR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		atomic64_inc(&priv->extra_stats.rx_babr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	if (events & IEVENT_EBERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 		atomic64_inc(&priv->extra_stats.eberr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		netif_dbg(priv, rx_err, dev, "bus error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	if (events & IEVENT_RXC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		netif_dbg(priv, rx_status, dev, "control frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	if (events & IEVENT_BABT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		atomic64_inc(&priv->extra_stats.tx_babt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) /* The interrupt handler for devices with one interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) static irqreturn_t gfar_interrupt(int irq, void *grp_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	struct gfar_priv_grp *gfargrp = grp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	/* Save ievent for future reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	u32 events = gfar_read(&gfargrp->regs->ievent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	/* Check for reception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	if (events & IEVENT_RX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		gfar_receive(irq, grp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	/* Check for transmit completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	if (events & IEVENT_TX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 		gfar_transmit(irq, grp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	/* Check for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	if (events & IEVENT_ERR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		gfar_error(irq, grp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) /* Polling 'interrupt' - used by things like netconsole to send skbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)  * without having to re-enable interrupts. It's not called while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)  * the interrupt routine is executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) static void gfar_netpoll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	/* If the device has multiple interrupts, run tx/rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 			disable_irq(gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 			disable_irq(gfar_irq(grp, RX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 			disable_irq(gfar_irq(grp, ER)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 			enable_irq(gfar_irq(grp, ER)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 			enable_irq(gfar_irq(grp, RX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 			enable_irq(gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 			struct gfar_priv_grp *grp = &priv->gfargrp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 			disable_irq(gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			enable_irq(gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) static void free_grp_irqs(struct gfar_priv_grp *grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	free_irq(gfar_irq(grp, TX)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	free_irq(gfar_irq(grp, RX)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	free_irq(gfar_irq(grp, ER)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) static int register_grp_irqs(struct gfar_priv_grp *grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	struct gfar_private *priv = grp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	/* If the device has multiple interrupts, register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	 * them.  Otherwise, only register for the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		/* Install our interrupt handlers for Error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		 * Transmit, and Receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 				  gfar_irq(grp, ER)->name, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 				  gfar_irq(grp, ER)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 			goto err_irq_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		enable_irq_wake(gfar_irq(grp, ER)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 				  gfar_irq(grp, TX)->name, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 				  gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 			goto tx_irq_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 				  gfar_irq(grp, RX)->name, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 				  gfar_irq(grp, RX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 			goto rx_irq_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		enable_irq_wake(gfar_irq(grp, RX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 				  gfar_irq(grp, TX)->name, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 				  gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 			goto err_irq_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 		enable_irq_wake(gfar_irq(grp, TX)->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) rx_irq_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	free_irq(gfar_irq(grp, TX)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) tx_irq_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	free_irq(gfar_irq(grp, ER)->irq, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) err_irq_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) static void gfar_free_irq(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	/* Free the IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		for (i = 0; i < priv->num_grps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 			free_grp_irqs(&priv->gfargrp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		for (i = 0; i < priv->num_grps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 				 &priv->gfargrp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) static int gfar_request_irq(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	int err, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 		err = register_grp_irqs(&priv->gfargrp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 			for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 				free_grp_irqs(&priv->gfargrp[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) /* Called when something needs to use the ethernet device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)  * Returns 0 for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) static int gfar_enet_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	err = init_phy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	err = gfar_request_irq(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	err = startup_gfar(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) /* Stops the kernel queue, and halts the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) static int gfar_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	cancel_work_sync(&priv->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	stop_gfar(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	/* Disconnect from the PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	phy_disconnect(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	gfar_free_irq(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) /* Clears each of the exact match registers to zero, so they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)  * don't interfere with normal reception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) static void gfar_clear_exact_match(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		gfar_set_mac_for_addr(dev, idx, zero_arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) /* Update the hash table based on the current list of multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)  * addresses we subscribe to.  Also, change the promiscuity of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)  * the device based on the flags (this function is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)  * whenever dev->flags is changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) static void gfar_set_multi(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	struct gfar_private *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		/* Set RCTRL to PROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		tempval = gfar_read(&regs->rctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 		tempval |= RCTRL_PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		gfar_write(&regs->rctrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		/* Set RCTRL to not PROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 		tempval = gfar_read(&regs->rctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 		tempval &= ~(RCTRL_PROM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		gfar_write(&regs->rctrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 		/* Set the hash to rx all multicast frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 		gfar_write(&regs->igaddr0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		gfar_write(&regs->igaddr1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 		gfar_write(&regs->igaddr2, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 		gfar_write(&regs->igaddr3, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		gfar_write(&regs->igaddr4, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		gfar_write(&regs->igaddr5, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		gfar_write(&regs->igaddr6, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 		gfar_write(&regs->igaddr7, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 		gfar_write(&regs->gaddr0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 		gfar_write(&regs->gaddr1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 		gfar_write(&regs->gaddr2, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 		gfar_write(&regs->gaddr3, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 		gfar_write(&regs->gaddr4, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 		gfar_write(&regs->gaddr5, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		gfar_write(&regs->gaddr6, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		gfar_write(&regs->gaddr7, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		int em_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		/* zero out the hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		gfar_write(&regs->igaddr0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		gfar_write(&regs->igaddr1, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 		gfar_write(&regs->igaddr2, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		gfar_write(&regs->igaddr3, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		gfar_write(&regs->igaddr4, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		gfar_write(&regs->igaddr5, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		gfar_write(&regs->igaddr6, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		gfar_write(&regs->igaddr7, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		gfar_write(&regs->gaddr0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		gfar_write(&regs->gaddr1, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		gfar_write(&regs->gaddr2, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 		gfar_write(&regs->gaddr3, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		gfar_write(&regs->gaddr4, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		gfar_write(&regs->gaddr5, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		gfar_write(&regs->gaddr6, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		gfar_write(&regs->gaddr7, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		/* If we have extended hash tables, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		 * clear the exact match registers to prepare for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		 * setting them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		if (priv->extended_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 			em_num = GFAR_EM_NUM + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 			gfar_clear_exact_match(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 			idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 			idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 			em_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 		if (netdev_mc_empty(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		/* Parse the list, and set the appropriate bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 			if (idx < em_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 				gfar_set_mac_for_addr(dev, idx, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 				idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 				gfar_set_hash_for_addr(dev, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) void gfar_mac_reset(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	/* Reset MAC layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	/* We need to delay at least 3 TX clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	udelay(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	/* the soft reset bit is not self-resetting, so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	 * clear it before resuming normal operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	gfar_write(&regs->maccfg1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 	udelay(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	gfar_rx_offload_en(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	/* Initialize the max receive frame/buffer lengths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 	/* Initialize the Minimum Frame Length Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	/* Initialize MACCFG2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	tempval = MACCFG2_INIT_SETTINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	if (gfar_has_errata(priv, GFAR_ERRATA_74))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	gfar_write(&regs->maccfg2, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	/* Clear mac addr hash registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	gfar_write(&regs->igaddr0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	gfar_write(&regs->igaddr1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	gfar_write(&regs->igaddr2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	gfar_write(&regs->igaddr3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	gfar_write(&regs->igaddr4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	gfar_write(&regs->igaddr5, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	gfar_write(&regs->igaddr6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	gfar_write(&regs->igaddr7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	gfar_write(&regs->gaddr0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	gfar_write(&regs->gaddr1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	gfar_write(&regs->gaddr2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	gfar_write(&regs->gaddr3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	gfar_write(&regs->gaddr4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	gfar_write(&regs->gaddr5, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	gfar_write(&regs->gaddr6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	gfar_write(&regs->gaddr7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	if (priv->extended_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		gfar_clear_exact_match(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	gfar_mac_rx_config(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	gfar_mac_tx_config(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	gfar_set_mac_address(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	gfar_set_multi(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	/* clear ievent and imask before configuring coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	gfar_ints_disable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	/* Configure the coalescing support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	gfar_configure_coalescing_all(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) static void gfar_hw_init(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	u32 attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	/* Stop the DMA engine now, in case it was running before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	 * (The firmware could have used it, and left it running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	gfar_halt(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	gfar_mac_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	/* Zero out the rmon mib registers if it has them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		/* Mask off the CAM interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		gfar_write(&regs->rmon.cam1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		gfar_write(&regs->rmon.cam2, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	/* Initialize ECNTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	/* Set the extraction length and index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	attrs = ATTRELI_EL(priv->rx_stash_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		ATTRELI_EI(priv->rx_stash_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	gfar_write(&regs->attreli, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	/* Start with defaults, and add stashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	 * depending on driver parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	attrs = ATTR_INIT_SETTINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	if (priv->bd_stash_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 		attrs |= ATTR_BDSTASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	if (priv->rx_stash_size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		attrs |= ATTR_BUFSTASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	gfar_write(&regs->attr, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	/* FIFO configs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	/* Program the interrupt steering regs, only for MG devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	if (priv->num_grps > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		gfar_write_isrg(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) static const struct net_device_ops gfar_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	.ndo_open = gfar_enet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	.ndo_start_xmit = gfar_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	.ndo_stop = gfar_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	.ndo_change_mtu = gfar_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	.ndo_set_features = gfar_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	.ndo_set_rx_mode = gfar_set_multi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	.ndo_tx_timeout = gfar_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	.ndo_do_ioctl = gfar_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	.ndo_get_stats = gfar_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	.ndo_change_carrier = fixed_phy_change_carrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	.ndo_set_mac_address = gfar_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	.ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	.ndo_poll_controller = gfar_netpoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) /* Set up the ethernet device structure, private data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)  * and anything else we need before we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) static int gfar_probe(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	struct gfar_private *priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	int err = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	err = gfar_of_init(ofdev, &dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	priv->ndev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	priv->ofdev = ofdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	priv->dev = &ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	SET_NETDEV_DEV(dev, &ofdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	INIT_WORK(&priv->reset_task, gfar_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	platform_set_drvdata(ofdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	gfar_detect_errata(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	/* Set the dev->base_addr to the gfar reg region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	/* Fill in the dev structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	/* MTU range: 50 - 9586 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	dev->mtu = 1500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	dev->min_mtu = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	dev->netdev_ops = &gfar_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	dev->ethtool_ops = &gfar_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	/* Register for napi ...We are registering NAPI for each grp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		if (priv->poll_mode == GFAR_SQ_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 				       gfar_poll_tx_sq, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 				       gfar_poll_rx, GFAR_DEV_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 			netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 				       gfar_poll_tx, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 				   NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 				    NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	gfar_init_addr_hash_table(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	/* Insert receive time stamps into padding alignment bytes, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	 * plus 2 bytes padding to ensure the cpu alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		priv->padding = 8 + DEFAULT_PADDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	if (dev->features & NETIF_F_IP_CSUM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 		dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	/* Initializing some of the rx/tx queue level parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	for (i = 0; i < priv->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 		priv->tx_queue[i]->txic = DEFAULT_TXIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	for (i = 0; i < priv->num_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	/* Always enable rx filer if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	priv->rx_filer_enable =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	    (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	/* Enable most messages by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	/* use pritority h/w tx queue scheduling for single queue devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	if (priv->num_tx_queues == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 		priv->prio_sched_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	set_bit(GFAR_DOWN, &priv->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	gfar_hw_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	/* Carrier starts down, phylib will bring it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		pr_err("%s: Cannot register net device, aborting\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 		goto register_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 		priv->wol_supported |= GFAR_WOL_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	    priv->rx_filer_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	/* fill out IRQ number and name fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		struct gfar_priv_grp *grp = &priv->gfargrp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 				dev->name, "_g", '0' + i, "_tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 				dev->name, "_g", '0' + i, "_rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 				dev->name, "_g", '0' + i, "_er");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 			strcpy(gfar_irq(grp, TX)->name, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	/* Initialize the filer table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	gfar_init_filer_table(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	/* Print out the device info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 	/* Even more device info helps when determining which kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	 * provided which set of benchmarks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	netdev_info(dev, "Running with NAPI enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	for (i = 0; i < priv->num_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 			    i, priv->rx_queue[i]->rx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	for (i = 0; i < priv->num_tx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 			    i, priv->tx_queue[i]->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) register_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	if (of_phy_is_fixed_link(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		of_phy_deregister_fixed_link(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	unmap_group_regs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	gfar_free_rx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	gfar_free_tx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	of_node_put(priv->phy_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	of_node_put(priv->tbi_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	free_gfar_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) static int gfar_remove(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	struct gfar_private *priv = platform_get_drvdata(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	of_node_put(priv->phy_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	of_node_put(priv->tbi_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	unregister_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	if (of_phy_is_fixed_link(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		of_phy_deregister_fixed_link(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	unmap_group_regs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	gfar_free_rx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	gfar_free_tx_queues(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	free_gfar_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) static void __gfar_filer_disable(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	temp = gfar_read(&regs->rctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	gfar_write(&regs->rctrl, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) static void __gfar_filer_enable(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	temp = gfar_read(&regs->rctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	gfar_write(&regs->rctrl, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) /* Filer rules implementing wol capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) static void gfar_filer_config_wol(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	u32 rqfcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	__gfar_filer_disable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	/* clear the filer table, reject any packet by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	for (i = 0; i <= MAX_FILER_IDX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		gfar_write_filer(priv, i, rqfcr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		/* unicast packet, accept it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 		struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 		/* get the default rx queue index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 				    (ndev->dev_addr[1] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 				     ndev->dev_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		rqfcr = (qindex << 10) | RQFCR_AND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 		dest_mac_addr = (ndev->dev_addr[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 				(ndev->dev_addr[4] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 				 ndev->dev_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 		rqfcr = (qindex << 10) | RQFCR_GPI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	__gfar_filer_enable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) static void gfar_filer_restore_table(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	u32 rqfcr, rqfpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	__gfar_filer_disable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	for (i = 0; i <= MAX_FILER_IDX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		rqfcr = priv->ftp_rqfcr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		rqfpr = priv->ftp_rqfpr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		gfar_write_filer(priv, i, rqfcr, rqfpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	__gfar_filer_enable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) static void gfar_start_wol_filer(struct gfar_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	/* Enable Rx hw queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	gfar_write(&regs->rqueue, priv->rqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	/* Initialize DMACTRL to have WWR and WOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	tempval = gfar_read(&regs->dmactrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	tempval |= DMACTRL_INIT_SETTINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	gfar_write(&regs->dmactrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	/* Make sure we aren't stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	tempval = gfar_read(&regs->dmactrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	tempval &= ~DMACTRL_GRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	gfar_write(&regs->dmactrl, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	for (i = 0; i < priv->num_grps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		regs = priv->gfargrp[i].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		/* Clear RHLT, so that the DMA starts polling now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		/* enable the Filer General Purpose Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		gfar_write(&regs->imask, IMASK_FGPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	/* Enable Rx DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	tempval = gfar_read(&regs->maccfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	tempval |= MACCFG1_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	gfar_write(&regs->maccfg1, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) static int gfar_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	struct gfar_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	u16 wol = priv->wol_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	if (!netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	disable_napi(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	netif_tx_lock(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	netif_tx_unlock(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	gfar_halt(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	if (wol & GFAR_WOL_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 		/* Enable interrupt on Magic Packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		gfar_write(&regs->imask, IMASK_MAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 		/* Enable Magic Packet mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		tempval = gfar_read(&regs->maccfg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 		tempval |= MACCFG2_MPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 		gfar_write(&regs->maccfg2, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 		/* re-enable the Rx block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 		tempval = gfar_read(&regs->maccfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 		tempval |= MACCFG1_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 		gfar_write(&regs->maccfg1, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	} else if (wol & GFAR_WOL_FILER_UCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 		gfar_filer_config_wol(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 		gfar_start_wol_filer(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 		phy_stop(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) static int gfar_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	struct gfar_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	u32 tempval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	u16 wol = priv->wol_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	if (!netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	if (wol & GFAR_WOL_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 		/* Disable Magic Packet mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 		tempval = gfar_read(&regs->maccfg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 		tempval &= ~MACCFG2_MPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 		gfar_write(&regs->maccfg2, tempval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	} else if (wol & GFAR_WOL_FILER_UCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 		/* need to stop rx only, tx is already down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 		gfar_halt(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 		gfar_filer_restore_table(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 		phy_start(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	gfar_start(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	enable_napi(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) static int gfar_restore(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	struct gfar_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	struct net_device *ndev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	if (!netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	gfar_init_bds(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	gfar_mac_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	gfar_init_tx_rx_base(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	gfar_start(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	priv->oldlink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	priv->oldspeed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	priv->oldduplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	if (ndev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		phy_start(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	enable_napi(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) static const struct dev_pm_ops gfar_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	.suspend = gfar_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	.resume = gfar_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	.freeze = gfar_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	.thaw = gfar_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	.restore = gfar_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) #define GFAR_PM_OPS (&gfar_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) #define GFAR_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) static const struct of_device_id gfar_match[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		.type = "network",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 		.compatible = "gianfar",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 		.compatible = "fsl,etsec2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) MODULE_DEVICE_TABLE(of, gfar_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) /* Structure for a device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) static struct platform_driver gfar_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		.name = "fsl-gianfar",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 		.pm = GFAR_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		.of_match_table = gfar_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	.probe = gfar_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 	.remove = gfar_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) module_platform_driver(gfar_driver);