^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * 2005-2010 (c) Aeroflex Gaisler AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * available in the GRLIB VHDL IP core library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Full documentation of both cores can be found here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * https://www.gaisler.com/products/grlib/grip.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * The Gigabit version supports scatter/gather DMA, any alignment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * buffers and checksum offloading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Contributors: Kristoffer Glembo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Daniel Hellstrom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Marko Isomaki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef CONFIG_SPARC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/idprom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "greth.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define GRETH_DEF_MSG_ENABLE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) (NETIF_MSG_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) NETIF_MSG_PROBE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) NETIF_MSG_LINK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) NETIF_MSG_IFDOWN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) NETIF_MSG_IFUP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) NETIF_MSG_RX_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) NETIF_MSG_TX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) module_param(greth_debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int macaddr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) module_param_array(macaddr, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int greth_edcl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) module_param(greth_edcl, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int greth_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int greth_rx(struct net_device *dev, int limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int greth_rx_gbit(struct net_device *dev, int limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void greth_clean_tx(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void greth_clean_tx_gbit(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static irqreturn_t greth_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int greth_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int greth_set_mac_add(struct net_device *dev, void *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void greth_set_multicast_list(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void greth_print_rx_packet(void *addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) addr, len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void greth_print_tx_packet(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (skb_shinfo(skb)->nr_frags == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) length = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) length = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) skb->data, length, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) skb_frag_address(&skb_shinfo(skb)->frags[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void greth_enable_tx(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void greth_enable_tx_and_irq(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) wmb(); /* BDs must been written to memory before enabling TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline void greth_disable_tx(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline void greth_enable_rx(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static inline void greth_disable_rx(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline void greth_enable_irqs(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline void greth_disable_irqs(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void greth_write_bd(u32 *bd, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __raw_writel(cpu_to_be32(val), bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline u32 greth_read_bd(u32 *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return be32_to_cpu(__raw_readl(bd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void greth_clean_rings(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct greth_bd *rx_bdp = greth->rx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct greth_bd *tx_bdp = greth->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (greth->gbit_mac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Free and unmap RX buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (greth->rx_skbuff[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) dev_kfree_skb(greth->rx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) greth_read_bd(&rx_bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) MAX_FRAME_SIZE+NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* TX buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) while (greth->tx_free < GRETH_TXBD_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) tx_bdp = greth->tx_bd_base + greth->tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) greth->tx_last = NEXT_TX(greth->tx_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) greth_read_bd(&tx_bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 0; i < nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) tx_bdp = greth->tx_bd_base + greth->tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) dma_unmap_page(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) greth_read_bd(&tx_bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) greth->tx_last = NEXT_TX(greth->tx_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) greth->tx_free += nr_frags+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else { /* 10/100 Mbps MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) kfree(greth->rx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) greth_read_bd(&rx_bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) kfree(greth->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) greth_read_bd(&tx_bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int greth_init_rings(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct greth_bd *rx_bd, *tx_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rx_bd = greth->rx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) tx_bd = greth->tx_bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Initialize descriptor rings and buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (greth->gbit_mac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for (i = 0; i < GRETH_RXBD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev_err(greth->dev, "Error allocating DMA ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) skb_reserve(skb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dma_addr = dma_map_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) MAX_FRAME_SIZE+NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (dma_mapping_error(greth->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dev_err(greth->dev, "Could not create initial DMA mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) greth->rx_skbuff[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) greth_write_bd(&rx_bd[i].addr, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) for (i = 0; i < GRETH_RXBD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (greth->rx_bufs[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) dev_err(greth->dev, "Error allocating DMA ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dma_addr = dma_map_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) greth->rx_bufs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (dma_mapping_error(greth->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dev_err(greth->dev, "Could not create initial DMA mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) greth_write_bd(&rx_bd[i].addr, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) for (i = 0; i < GRETH_TXBD_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (greth->tx_bufs[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dev_err(greth->dev, "Error allocating DMA ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dma_addr = dma_map_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) greth->tx_bufs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (dma_mapping_error(greth->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_err(greth->dev, "Could not create initial DMA mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) greth_write_bd(&tx_bd[i].addr, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) greth_write_bd(&tx_bd[i].stat, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Initialize pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) greth->rx_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) greth->tx_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) greth->tx_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) greth->tx_free = GRETH_TXBD_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* Initialize descriptor base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) greth_clean_rings(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int greth_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) err = greth_init_rings(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) greth_clean_rings(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dev_dbg(&dev->dev, " starting queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) GRETH_REGSAVE(greth->regs->status, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) napi_enable(&greth->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) greth_enable_irqs(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) greth_enable_tx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) greth_enable_rx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int greth_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) napi_disable(&greth->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) greth_disable_irqs(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) greth_disable_tx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) greth_disable_rx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) free_irq(greth->irq, (void *) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) greth_clean_rings(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct greth_bd *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int err = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u32 status, dma_addr, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Clean TX Ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) greth_clean_tx(greth->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (unlikely(greth->tx_free <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ctrl = GRETH_REGLOAD(greth->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Enable TX IRQ only if not already in poll() routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (ctrl & GRETH_RXI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (netif_msg_pktdata(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) greth_print_tx_packet(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (unlikely(skb->len > MAX_FRAME_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) bdp = greth->tx_bd_base + greth->tx_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dma_addr = greth_read_bd(&bdp->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Wrap around descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) status |= GRETH_BD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) greth->tx_next = NEXT_TX(greth->tx_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) greth->tx_free--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Write descriptor control word and enable transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) greth_write_bd(&bdp->stat, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) greth_enable_tx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (tx_next < tx_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return (tx_last - tx_next) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct greth_bd *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u32 status, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u16 tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tx_last = greth->tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rmb(); /* tx_last is updated by the poll task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) err = NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (netif_msg_pktdata(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) greth_print_tx_packet(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (unlikely(skb->len > MAX_FRAME_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Save skb pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) greth->tx_skbuff[greth->tx_next] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Linear buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (nr_frags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) status = GRETH_TXBD_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) status = GRETH_BD_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) status |= GRETH_TXBD_CSALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) status |= skb_headlen(skb) & GRETH_BD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (greth->tx_next == GRETH_TXBD_NUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) status |= GRETH_BD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bdp = greth->tx_bd_base + greth->tx_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) greth_write_bd(&bdp->stat, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) goto map_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) greth_write_bd(&bdp->addr, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) curr_tx = NEXT_TX(greth->tx_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Frags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) for (i = 0; i < nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) greth->tx_skbuff[curr_tx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bdp = greth->tx_bd_base + curr_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) status = GRETH_BD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) status |= GRETH_TXBD_CSALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) status |= skb_frag_size(frag) & GRETH_BD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Wrap around descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (curr_tx == GRETH_TXBD_NUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) status |= GRETH_BD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* More fragments left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (i < nr_frags - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) status |= GRETH_TXBD_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) status |= GRETH_BD_IE; /* enable IRQ on last fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) greth_write_bd(&bdp->stat, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) goto frag_map_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) greth_write_bd(&bdp->addr, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) curr_tx = NEXT_TX(curr_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Enable the descriptor chain by enabling the first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) bdp = greth->tx_bd_base + greth->tx_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) greth_write_bd(&bdp->stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) greth_read_bd(&bdp->stat) | GRETH_BD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) greth->tx_next = curr_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) greth_enable_tx_and_irq(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) frag_map_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Unmap SKB mappings that succeeded and disable descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) for (i = 0; greth->tx_next + i != curr_tx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) bdp = greth->tx_bd_base + greth->tx_next + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) greth_read_bd(&bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) greth_write_bd(&bdp->stat, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) map_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dev_warn(greth->dev, "Could not create TX DMA mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static irqreturn_t greth_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u32 status, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) irqreturn_t retval = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) spin_lock(&greth->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Get the interrupt events that caused us to be here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) status = GRETH_REGLOAD(greth->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * set regardless of whether IRQ is enabled or not. Especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * important when shared IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ctrl = GRETH_REGLOAD(greth->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Handle rx and tx interrupts through poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* Disable interrupts and schedule poll() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) greth_disable_irqs(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) napi_schedule(&greth->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_unlock(&greth->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static void greth_clean_tx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct greth_bd *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) bdp = greth->tx_bd_base + greth->tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) stat = greth_read_bd(&bdp->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (unlikely(stat & GRETH_BD_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (greth->tx_free == GRETH_TXBD_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* Check status for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (unlikely(stat & GRETH_TXBD_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (stat & GRETH_TXBD_ERR_AL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (stat & GRETH_TXBD_ERR_UE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) greth->tx_last = NEXT_TX(greth->tx_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) greth->tx_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (greth->tx_free > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Check status for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (unlikely(stat & GRETH_TXBD_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (stat & GRETH_TXBD_ERR_AL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (stat & GRETH_TXBD_ERR_UE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (stat & GRETH_TXBD_ERR_LC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static void greth_clean_tx_gbit(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct greth_bd *bdp, *bdp_last_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int nr_frags, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) u16 tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tx_last = greth->tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) while (tx_last != greth->tx_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) skb = greth->tx_skbuff[tx_last];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* We only clean fully completed SKBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) stat = greth_read_bd(&bdp_last_frag->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (stat & GRETH_BD_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) greth->tx_skbuff[tx_last] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) greth_update_tx_stats(dev, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) bdp = greth->tx_bd_base + tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) tx_last = NEXT_TX(tx_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) greth_read_bd(&bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) for (i = 0; i < nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) bdp = greth->tx_bd_base + tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dma_unmap_page(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) greth_read_bd(&bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) tx_last = NEXT_TX(tx_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (skb) { /* skb is set only if the above while loop was entered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) greth->tx_last = tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) (greth_num_free_bds(tx_last, greth->tx_next) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) (MAX_SKB_FRAGS+1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static int greth_rx(struct net_device *dev, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct greth_bd *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int bad, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) u32 status, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) for (count = 0; count < limit; ++count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) bdp = greth->rx_bd_base + greth->rx_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) status = greth_read_bd(&bdp->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (unlikely(status & GRETH_BD_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dma_addr = greth_read_bd(&bdp->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) bad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Check status for errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (unlikely(status & GRETH_RXBD_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (status & GRETH_RXBD_ERR_FT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) bad = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) bad = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (status & GRETH_RXBD_ERR_CRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) bad = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (unlikely(bad)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) pkt_len = status & GRETH_BD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) skb_reserve(skb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dma_sync_single_for_cpu(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (netif_msg_pktdata(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) skb_put_data(skb, phys_to_virt(dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) status = GRETH_BD_EN | GRETH_BD_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) status |= GRETH_BD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) greth_write_bd(&bdp->stat, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) greth_enable_rx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) greth->rx_cur = NEXT_RX(greth->rx_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static inline int hw_checksummed(u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (status & GRETH_RXBD_IP_FRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static int greth_rx_gbit(struct net_device *dev, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct greth_bd *bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct sk_buff *skb, *newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) int pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int bad, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) u32 status, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) for (count = 0; count < limit; ++count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) bdp = greth->rx_bd_base + greth->rx_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) skb = greth->rx_skbuff[greth->rx_cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) status = greth_read_bd(&bdp->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) bad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (status & GRETH_BD_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Check status for errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (unlikely(status & GRETH_RXBD_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (status & GRETH_RXBD_ERR_FT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) bad = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) } else if (status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) bad = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) } else if (status & GRETH_RXBD_ERR_CRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) bad = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* Allocate new skb to replace current, not needed if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * current skb can be reused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) skb_reserve(newskb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dma_addr = dma_map_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) newskb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) MAX_FRAME_SIZE + NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (!dma_mapping_error(greth->dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* Process the incoming frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) pkt_len = status & GRETH_BD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dma_unmap_single(greth->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) greth_read_bd(&bdp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) MAX_FRAME_SIZE + NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (netif_msg_pktdata(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) greth->rx_skbuff[greth->rx_cur] = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) greth_write_bd(&bdp->addr, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) dev_kfree_skb(newskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* reusing current skb, so it is a drop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) } else if (bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Bad Frame transfer, the skb is reused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Failed Allocating a new skb. This is rather stupid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * but the current "filled" skb is reused, as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * transfer failure. One could argue that RX descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * table handling should be divided into cleaning and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * filling as the TX part of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* reusing current skb, so it is a drop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) status = GRETH_BD_EN | GRETH_BD_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) status |= GRETH_BD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) greth_write_bd(&bdp->stat, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) spin_lock_irqsave(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) greth_enable_rx(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) greth->rx_cur = NEXT_RX(greth->rx_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static int greth_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) u32 mask, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) greth = container_of(napi, struct greth_private, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) restart_txrx_poll:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (greth->gbit_mac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) greth_clean_tx_gbit(greth->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) work_done += greth_rx_gbit(greth->netdev, budget - work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (netif_queue_stopped(greth->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) greth_clean_tx(greth->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) work_done += greth_rx(greth->netdev, budget - work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) spin_lock_irqsave(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ctrl = GRETH_REGLOAD(greth->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) GRETH_REGSAVE(greth->regs->control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ctrl | GRETH_TXI | GRETH_RXI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) mask = GRETH_INT_RX | GRETH_INT_RE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) GRETH_INT_TX | GRETH_INT_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) mask = GRETH_INT_RX | GRETH_INT_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (GRETH_REGLOAD(greth->regs->status) & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) GRETH_REGSAVE(greth->regs->control, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto restart_txrx_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int greth_set_mac_add(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct greth_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) regs = greth->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dev->dev_addr[4] << 8 | dev->dev_addr[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static u32 greth_hash_get_index(__u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return (ether_crc(6, addr)) & 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static void greth_set_hash_filter(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct greth_regs *regs = greth->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) u32 mc_filter[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) unsigned int bitnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) mc_filter[0] = mc_filter[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) bitnr = greth_hash_get_index(ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static void greth_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct greth_regs *regs = greth->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) cfg = GRETH_REGLOAD(regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) cfg |= GRETH_CTRL_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) cfg &= ~GRETH_CTRL_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (greth->multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) GRETH_REGSAVE(regs->hash_msb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) GRETH_REGSAVE(regs->hash_lsb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) cfg |= GRETH_CTRL_MCEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) GRETH_REGSAVE(regs->control, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) cfg &= ~GRETH_CTRL_MCEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) GRETH_REGSAVE(regs->control, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Setup multicast filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) greth_set_hash_filter(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) cfg |= GRETH_CTRL_MCEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) GRETH_REGSAVE(regs->control, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static u32 greth_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return greth->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static void greth_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) greth->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int greth_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return sizeof(struct greth_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) strlcpy(info->driver, dev_driver_string(greth->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u32 *buff = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) buff[i] = greth_read_bd(&greth_regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static const struct ethtool_ops greth_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .get_msglevel = greth_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .set_msglevel = greth_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .get_drvinfo = greth_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .get_regs_len = greth_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .get_regs = greth_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .get_link_ksettings = phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .set_link_ksettings = phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static struct net_device_ops greth_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .ndo_open = greth_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .ndo_stop = greth_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .ndo_start_xmit = greth_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .ndo_set_mac_address = greth_set_mac_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static inline int wait_for_mdio(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) unsigned long timeout = jiffies + 4*HZ/100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct greth_private *greth = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!wait_for_mdio(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!wait_for_mdio(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct greth_private *greth = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!wait_for_mdio(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) GRETH_REGSAVE(greth->regs->mdio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (!wait_for_mdio(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static void greth_link_change(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct phy_device *phydev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int status_change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) spin_lock_irqsave(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ctrl = GRETH_REGLOAD(greth->regs->control) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (phydev->duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ctrl |= GRETH_CTRL_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (phydev->speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ctrl |= GRETH_CTRL_SP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) else if (phydev->speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) ctrl |= GRETH_CTRL_GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) GRETH_REGSAVE(greth->regs->control, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) greth->speed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) greth->duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (phydev->link != greth->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (!phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) greth->speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) greth->duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) greth->link = phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) spin_unlock_irqrestore(&greth->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (status_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (phydev->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) pr_debug("%s: link up (%d/%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) dev->name, phydev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) pr_debug("%s: link down\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) static int greth_mdio_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct greth_private *greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct phy_device *phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* Find the first PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) phy = phy_find_first(greth->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (!phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dev_err(&dev->dev, "no PHY found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) ret = phy_connect_direct(dev, phy, &greth_link_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (netif_msg_ifup(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev_err(&dev->dev, "could not attach to PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (greth->gbit_mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) phy_set_max_speed(phy, SPEED_1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) phy_set_max_speed(phy, SPEED_100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) linkmode_copy(phy->advertising, phy->supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) greth->link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) greth->speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) greth->duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static int greth_mdio_init(struct greth_private *greth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct net_device *ndev = greth->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) greth->mdio = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!greth->mdio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) greth->mdio->name = "greth-mdio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) greth->mdio->read = greth_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) greth->mdio->write = greth_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) greth->mdio->priv = greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ret = mdiobus_register(greth->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) ret = greth_mdio_probe(greth->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) goto unreg_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) phy_start(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* If Ethernet debug link is used make autoneg happen right away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (greth->edcl && greth_edcl == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) phy_start_aneg(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) timeout = jiffies + 6*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) while (!phy_aneg_done(ndev->phydev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) phy_read_status(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) greth_link_change(greth->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) unreg_mdio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) mdiobus_unregister(greth->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) mdiobus_free(greth->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Initialize the GRETH MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int greth_of_probe(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct greth_private *greth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct greth_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) dev = alloc_etherdev(sizeof(struct greth_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (dev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) greth = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) greth->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) greth->dev = &ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (greth_debug > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) greth->msg_enable = greth_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) greth->msg_enable = GRETH_DEF_MSG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) spin_lock_init(&greth->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) greth->regs = of_ioremap(&ofdev->resource[0], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) resource_size(&ofdev->resource[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) "grlib-greth regs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (greth->regs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) dev_err(greth->dev, "ioremap failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) regs = greth->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) greth->irq = ofdev->archdata.irqs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dev_set_drvdata(greth->dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) SET_NETDEV_DEV(dev, greth->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dev_dbg(greth->dev, "resetting controller.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* Reset the controller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) GRETH_REGSAVE(regs->control, GRETH_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Wait for MAC to reset itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) timeout = jiffies + HZ/100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) dev_err(greth->dev, "timeout when waiting for reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* Get default PHY address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* Check if we have GBIT capable MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) tmp = GRETH_REGLOAD(regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) greth->gbit_mac = (tmp >> 27) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Check for multicast capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) greth->multicast = (tmp >> 25) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) greth->edcl = (tmp >> 31) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* If we have EDCL we disable the EDCL speed-duplex FSM so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * it doesn't interfere with the software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (greth->edcl != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /* Check if MAC can handle MDIO interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) greth->mdio_int_en = (tmp >> 26) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) err = greth_mdio_init(greth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dev_err(greth->dev, "failed to register MDIO bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* Allocate TX descriptor ring in coherent memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) &greth->tx_bd_base_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (!greth->tx_bd_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) goto error3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Allocate RX descriptor ring in coherent memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) &greth->rx_bd_base_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!greth->rx_bd_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) goto error4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Get MAC address from: module param, OF property or ID prom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (macaddr[i] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (i == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) const u8 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) addr = of_get_mac_address(ofdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (!IS_ERR(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) macaddr[i] = (unsigned int) addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) #ifdef CONFIG_SPARC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) dev->dev_addr[i] = macaddr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) macaddr[5]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (!is_valid_ether_addr(&dev->dev_addr[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) dev_err(greth->dev, "no valid ethernet address, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) goto error5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) dev->dev_addr[4] << 8 | dev->dev_addr[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Clear all pending interrupts except PHY irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) GRETH_REGSAVE(regs->status, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (greth->gbit_mac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) dev->features = dev->hw_features | NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (greth->multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) dev->flags |= IFF_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev->flags &= ~IFF_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) dev->netdev_ops = &greth_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) dev->ethtool_ops = &greth_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (netif_msg_probe(greth))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dev_err(greth->dev, "netdevice registration failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto error5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /* setup NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) netif_napi_add(dev, &greth->napi, greth_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) error5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) error4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) error3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) mdiobus_unregister(greth->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) error2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static int greth_of_remove(struct platform_device *of_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct net_device *ndev = platform_get_drvdata(of_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct greth_private *greth = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* Free descriptor areas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (ndev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) phy_stop(ndev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) mdiobus_unregister(greth->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static const struct of_device_id greth_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .name = "GAISLER_ETHMAC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .name = "01_01d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) MODULE_DEVICE_TABLE(of, greth_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static struct platform_driver greth_of_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) .name = "grlib-greth",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) .of_match_table = greth_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .probe = greth_of_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .remove = greth_of_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) module_platform_driver(greth_of_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) MODULE_AUTHOR("Aeroflex Gaisler AB.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) MODULE_LICENSE("GPL");