^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Dave DNET Ethernet Controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "dnet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* function for reading internal MAC register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u16 data_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* issue a read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) dnet_writel(bp, reg, MACREG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* since a read/write op to the MAC is very slow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * we must wait before reading the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) ndelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* read data read from the MAC register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) data_read = dnet_readl(bp, MACREG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return data_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* function for writing internal MAC register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* load data to write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) dnet_writel(bp, val, MACREG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* issue a write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* since a read/write op to the MAC is very slow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * we must wait before exiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ndelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void __dnet_set_hwaddr(struct dnet *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void dnet_get_hwaddr(struct dnet *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u8 addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * from MAC docs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * "Note that the MAC address is stored in the registers in Hexadecimal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * would require writing 0xAC (octet 0) to address 0x0B (high byte of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Mac_addr[15:0]).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *((__be16 *)addr) = cpu_to_be16(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (is_valid_ether_addr(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) memcpy(bp->dev->dev_addr, addr, sizeof(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dnet *bp = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u16 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) & DNET_INTERNAL_GMII_MNG_CMD_FIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* only 5 bits allowed for phy-addr and reg_offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mii_id &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) regnum &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* prepare reg_value for a read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) value = (mii_id << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) value |= regnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* write control word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* wait for end of transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) & DNET_INTERNAL_GMII_MNG_CMD_FIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct dnet *bp = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) & DNET_INTERNAL_GMII_MNG_CMD_FIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* prepare for a write operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) tmp = (1 << 13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* only 5 bits allowed for phy-addr and reg_offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mii_id &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) regnum &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* only 16 bits on data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) value &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* prepare reg_value for a write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) tmp |= (mii_id << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) tmp |= regnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* write data to write first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* write control word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) & DNET_INTERNAL_GMII_MNG_CMD_FIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void dnet_handle_link_change(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct phy_device *phydev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 mode_reg, ctl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int status_change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (bp->duplex != phydev->duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (phydev->duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ctl_reg &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ctl_reg |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) bp->duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (bp->speed != phydev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) switch (phydev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) case 1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mode_reg |= DNET_INTERNAL_MODE_GBITEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case 100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) "%s: Ack! Speed (%d) is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "10/100/1000!\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) phydev->speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bp->speed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (phydev->link != bp->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mode_reg |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) mode_reg &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ~(DNET_INTERNAL_MODE_RXEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) DNET_INTERNAL_MODE_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bp->speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bp->duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bp->link = phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (status_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (status_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (phydev->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) printk(KERN_INFO "%s: link up (%d/%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev->name, phydev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) printk(KERN_INFO "%s: link down\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static int dnet_mii_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct phy_device *phydev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* find the first phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) phydev = phy_find_first(bp->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!phydev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) printk(KERN_ERR "%s: no PHY found\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* TODO : add pin_irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* attach the mac to the phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (bp->capabilities & DNET_HAS_RMII) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) phydev = phy_connect(dev, phydev_name(phydev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) &dnet_handle_link_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) PHY_INTERFACE_MODE_RMII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) phydev = phy_connect(dev, phydev_name(phydev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) &dnet_handle_link_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) PHY_INTERFACE_MODE_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (IS_ERR(phydev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return PTR_ERR(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* mask with MAC supported features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (bp->capabilities & DNET_HAS_GIGABIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) phy_set_max_speed(phydev, SPEED_1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) phy_set_max_speed(phydev, SPEED_100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) phy_support_asym_pause(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) bp->link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bp->speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) bp->duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int dnet_mii_init(struct dnet *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) bp->mii_bus = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (bp->mii_bus == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bp->mii_bus->name = "dnet_mii_bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bp->mii_bus->read = &dnet_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bp->mii_bus->write = &dnet_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) bp->pdev->name, bp->pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) bp->mii_bus->priv = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (mdiobus_register(bp->mii_bus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (dnet_mii_probe(bp->dev) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) goto err_out_unregister_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) err_out_unregister_bus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mdiobus_unregister(bp->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mdiobus_free(bp->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int dnet_phy_marvell_fixup(struct phy_device *phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return phy_write(phydev, 0x18, 0x4148);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void dnet_update_stats(struct dnet *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 *p = &bp->hw_stats.rx_pkt_ignr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) u32 *end = &bp->hw_stats.rx_byte + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) WARN_ON((unsigned long)(end - p - 1) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) for (; p < end; p++, reg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *p += readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) reg = bp->regs + DNET_TX_UNICAST_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) p = &bp->hw_stats.tx_unicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) end = &bp->hw_stats.tx_byte + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) WARN_ON((unsigned long)(end - p - 1) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (; p < end; p++, reg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *p += readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int dnet_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct dnet *bp = container_of(napi, struct dnet, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct net_device *dev = bp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int npackets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int *data_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u32 int_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u32 cmd_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) while (npackets < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * break out of while loop if there are no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * packets waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cmd_word = dnet_readl(bp, RX_LEN_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pkt_len = cmd_word & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (cmd_word & 0xDF180000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) printk(KERN_ERR "%s packet receive error %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) __func__, cmd_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) skb = netdev_alloc_skb(dev, pkt_len + 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Align IP on 16 byte boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * 'skb_put()' points to the start of sk_buff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * data area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) data_ptr = skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) for (i = 0; i < (pkt_len + 3) >> 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) npackets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) printk(KERN_NOTICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) "%s: No memory to allocate a sk_buff of "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) "size %u.\n", dev->name, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (npackets < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* We processed all packets available. Tell NAPI it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * stop polling then re-enable rx interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) napi_complete_done(napi, npackets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int_enable = dnet_readl(bp, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dnet_writel(bp, int_enable, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return npackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static irqreturn_t dnet_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u32 int_src, int_enable, int_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* read and clear the DNET irq (clear on read) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int_src = dnet_readl(bp, INTR_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int_enable = dnet_readl(bp, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int_current = int_src & int_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* restart the queue if we had stopped it for TX fifo almost full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int_enable = dnet_readl(bp, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dnet_writel(bp, int_enable, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* RX FIFO error checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (int_current &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) dnet_readl(bp, RX_STATUS), int_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* we can only flush the RX FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ndelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dnet_writel(bp, 0, SYS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* TX FIFO error checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (int_current &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dnet_readl(bp, TX_STATUS), int_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* we can only flush the TX FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ndelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dnet_writel(bp, 0, SYS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (napi_schedule_prep(&bp->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * There's no point taking any more interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * until we have processed the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Disable Rx interrupts and schedule NAPI poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int_enable = dnet_readl(bp, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dnet_writel(bp, int_enable, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __napi_schedule(&bp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pr_debug("%s: irq %x remains\n", __func__, int_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static inline void dnet_print_skb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) printk(KERN_DEBUG PFX "data:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) for (k = 0; k < skb->len; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) printk(" %02x", (unsigned int)skb->data[k]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define dnet_print_skb(skb) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned int i, tx_cmd, wrsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned int *bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 irq_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) dnet_readl(bp, TX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) pr_debug("start_xmit: len %u head %p data %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) skb->len, skb->head, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dnet_print_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dnet_readl(bp, TX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) wrsz = (u32) skb->len + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) wrsz += ((unsigned long) skb->data) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) wrsz >>= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* check if there is enough room for the current frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (i = 0; i < wrsz; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dnet_writel(bp, *bufp++, TX_DATA_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * inform MAC that a packet's written and ready to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * shipped out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dnet_readl(bp, INTR_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) irq_enable = dnet_readl(bp, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dnet_writel(bp, irq_enable, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* free the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void dnet_reset_hw(struct dnet *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* put ts_mac in IDLE state i.e. disable rx/tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * RX FIFO almost full threshold: only cmd FIFO almost full is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * implemented for RX side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * TX FIFO almost empty threshold: only data FIFO almost empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * is implemented for TX side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* flush rx/tx fifos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) SYS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dnet_writel(bp, 0, SYS_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static void dnet_init_hw(struct dnet *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dnet_reset_hw(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) __dnet_set_hwaddr(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (bp->dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Copy All Frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!(bp->dev->flags & IFF_BROADCAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* No BroadCast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* clear irq before enabling them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) config = dnet_readl(bp, INTR_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* enable RX/TX interrupt, recv packet ready interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static int dnet_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* if the phy is not yet register, retry later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (!dev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) napi_enable(&bp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dnet_init_hw(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) phy_start_aneg(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* schedule a link state check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) phy_start(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static int dnet_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) napi_disable(&bp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (dev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) phy_stop(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dnet_reset_hw(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) pr_debug("----------------------------- RX statistics "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) "-------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) pr_debug("----------------------------- TX statistics "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "-------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static struct net_device_stats *dnet_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct dnet *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct net_device_stats *nstat = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct dnet_stats *hwstat = &bp->hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* read stats from hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dnet_update_stats(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* Convert HW stats into netdevice stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) nstat->rx_errors = (hwstat->rx_len_chk_err +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* ignore IGP violation error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) hwstat->rx_ipg_viol + */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) hwstat->rx_crc_err +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) hwstat->rx_pre_shrink +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) nstat->tx_errors = hwstat->tx_bad_fcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) nstat->rx_length_errors = (hwstat->rx_len_chk_err +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) hwstat->rx_lng_frm +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) nstat->rx_crc_errors = hwstat->rx_crc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) nstat->rx_packets = hwstat->rx_ok_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) nstat->tx_packets = (hwstat->tx_unicast +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) hwstat->tx_multicast + hwstat->tx_brdcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) nstat->rx_bytes = hwstat->rx_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) nstat->tx_bytes = hwstat->tx_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) nstat->multicast = hwstat->rx_multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dnet_print_pretty_hwstats(hwstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return nstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static void dnet_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) strlcpy(info->bus_info, "0", sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) static const struct ethtool_ops dnet_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .get_drvinfo = dnet_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .get_link_ksettings = phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .set_link_ksettings = phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static const struct net_device_ops dnet_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .ndo_open = dnet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .ndo_stop = dnet_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .ndo_get_stats = dnet_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .ndo_start_xmit = dnet_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .ndo_do_ioctl = phy_do_ioctl_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static int dnet_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct dnet *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dev = alloc_etherdev(sizeof(*bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* TODO: Actually, we have some interesting features... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dev->features |= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) bp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) platform_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) spin_lock_init(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (IS_ERR(bp->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) err = PTR_ERR(bp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) irq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dev->netdev_ops = &dnet_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) netif_napi_add(dev, &bp->napi, dnet_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev->ethtool_ops = &dnet_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dev->base_addr = (unsigned long)bp->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dnet_get_hwaddr(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!is_valid_ether_addr(dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* choose a random ethernet address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) eth_hw_addr_random(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) __dnet_set_hwaddr(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto err_out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* register the PHY board fixup (for Marvell 88E1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) dnet_phy_marvell_fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* we can live without it, so just issue a warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) err = dnet_mii_init(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) goto err_out_unregister_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) phydev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) phy_attached_info(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) err_out_unregister_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) err_out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static int dnet_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct dnet *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (dev->phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) phy_disconnect(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) mdiobus_unregister(bp->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) mdiobus_free(bp->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static struct platform_driver dnet_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) .probe = dnet_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) .remove = dnet_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) .name = "dnet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) module_platform_driver(dnet_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) MODULE_DESCRIPTION("Dave DNET Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) "Matteo Vit <matteo.vit@dave.eu>");