^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* drivers/net/ethernet/8390/ax88796.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2005,2007 Simtec Electronics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Ben Dooks <ben@simtec.co.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Asix AX88796 10/100 Ethernet controller support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on ne.c, by Donald Becker, et-al.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/isapnp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mdio-bitbang.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/eeprom_93cx6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/ax88796.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Rename the lib8390.c functions to show that they are in this driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define __ei_open ax_ei_open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define __ei_close ax_ei_close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define __ei_poll ax_ei_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define __ei_start_xmit ax_ei_start_xmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define __ei_tx_timeout ax_ei_tx_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define __ei_get_stats ax_ei_get_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define __ei_set_multicast_list ax_ei_set_multicast_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define __ei_interrupt ax_ei_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define ____alloc_ei_netdev ax__alloc_ei_netdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define __NS8390_init ax_NS8390_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* force unsigned long back to 'void __iomem *' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define ax_convert_addr(_a) ((void __force __iomem *)(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define ei_inb(_a) readb(ax_convert_addr(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define ei_inb_p(_a) ei_inb(_a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define ei_outb_p(_v, _a) ei_outb(_v, _a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* define EI_SHIFT() to take into account our register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define EI_SHIFT(x) (ei_local->reg_offset[(x)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Ensure we have our RCR base value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define AX88796_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "lib8390.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DRV_NAME "ax88796"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DRV_VERSION "1.00"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* from ne.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define NE_CMD EI_SHIFT(0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define NE_RESET EI_SHIFT(0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define NE_DATAPORT EI_SHIFT(0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define NE1SM_START_PG 0x20 /* First page of TX buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define NESM_START_PG 0x40 /* First page of TX buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define AX_GPOC_PPDSET BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* device private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct ax_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct mii_bus *mii_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mdiobb_ctrl bb_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void __iomem *addr_memr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u8 reg_memr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void __iomem *map2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) const struct ax_plat_data *plat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned char running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned char resume_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 reg_offsets[0x20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline struct ax_device *to_ax_dev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return (struct ax_device *)(ei_local + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * ax_initial_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * do an initial probe for the card to check whether it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * and is functional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static int ax_initial_check(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void __iomem *ioaddr = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int reg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int regd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) reg0 = ei_inb(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (reg0 == 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) regd = ei_inb(ioaddr + 0x0d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ei_outb(0xff, ioaddr + 0x0d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ei_outb(reg0, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ei_outb(regd, ioaddr + 0x0d); /* Restore the old values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Hard reset the card. This used to pause for the same period that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * 8390 reset command required, but that shouldn't be necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void ax_reset_8390(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long reset_start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void __iomem *addr = (void __iomem *)dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ei_local->txing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ei_local->dmaing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* This check _should_not_ be necessary, omit eventually. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) netdev_warn(dev, "%s: did not complete.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Wrapper for __ei_interrupt for platforms that have a platform-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * way to find out whether the interrupt request might be caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * the ax88796 chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static irqreturn_t ax_ei_interrupt_filtered(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct platform_device *pdev = to_platform_device(dev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!ax->plat->check_irq(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return ax_ei_interrupt(irq, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int ring_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void __iomem *nic_base = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* This *shouldn't* happen. If it does, it's the last thing you'll see */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (ei_local->dmaing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) netdev_err(dev, "DMAing conflict in %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) "[DMAstat:%d][irqlock:%d].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ei_local->dmaing, ei_local->irqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ei_local->dmaing |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ei_outb(0, nic_base + EN0_RCNTHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ei_outb(ring_page, nic_base + EN0_RSARHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ei_local->word16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ioread16_rep(nic_base + NE_DATAPORT, hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) sizeof(struct e8390_pkt_hdr) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ioread8_rep(nic_base + NE_DATAPORT, hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) sizeof(struct e8390_pkt_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ei_local->dmaing &= ~0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) le16_to_cpus(&hdr->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Block input and output, similar to the Crynwr packet driver. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * you are porting to a new ethercard, look at the packet driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * source for hints. The NEx000 doesn't share the on-board packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * memory -- you have to put the packet out through the "remote DMA"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * dataport using ei_outb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void ax_block_input(struct net_device *dev, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct sk_buff *skb, int ring_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void __iomem *nic_base = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) char *buf = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (ei_local->dmaing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) netdev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "DMAing conflict in %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "[DMAstat:%d][irqlock:%d].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ei_local->dmaing, ei_local->irqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ei_local->dmaing |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ei_outb(count >> 8, nic_base + EN0_RCNTHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (ei_local->word16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ioread16_rep(nic_base + NE_DATAPORT, buf, count >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (count & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ioread8_rep(nic_base + NE_DATAPORT, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ei_local->dmaing &= ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void ax_block_output(struct net_device *dev, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) const unsigned char *buf, const int start_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void __iomem *nic_base = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long dma_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Round the count up for word writes. Do we need to do this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * What effect will an odd byte count have on the 8390? I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * should check someday.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (ei_local->word16 && (count & 0x01))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* This *shouldn't* happen. If it does, it's the last thing you'll see */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (ei_local->dmaing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) netdev_err(dev, "DMAing conflict in %s."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) "[DMAstat:%d][irqlock:%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ei_local->dmaing, ei_local->irqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ei_local->dmaing |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* We should already be in page 0, but to be safe... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ei_outb(ENISR_RDC, nic_base + EN0_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Now the normal output. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ei_outb(count >> 8, nic_base + EN0_RCNTHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ei_outb(0x00, nic_base + EN0_RSARLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ei_outb(start_page, nic_base + EN0_RSARHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (ei_local->word16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) iowrite16_rep(nic_base + NE_DATAPORT, buf, count >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) iowrite8_rep(nic_base + NE_DATAPORT, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dma_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) netdev_warn(dev, "timeout waiting for Tx RDC.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ax_reset_8390(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ax_NS8390_init(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ei_local->dmaing &= ~0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* definitions for accessing MII/EEPROM interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #define AX_MEMR EI_SHIFT(0x14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define AX_MEMR_MDC BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define AX_MEMR_MDIR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define AX_MEMR_MDI BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define AX_MEMR_MDO BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #define AX_MEMR_EECS BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define AX_MEMR_EEI BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define AX_MEMR_EEO BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define AX_MEMR_EECLK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void ax_handle_link_change(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct phy_device *phy_dev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int status_change = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) (ax->duplex != phy_dev->duplex))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ax->speed = phy_dev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ax->duplex = phy_dev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (phy_dev->link != ax->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!phy_dev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ax->speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ax->duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ax->link = phy_dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) status_change = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (status_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) phy_print_status(phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int ax_mii_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct phy_device *phy_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* find the first phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) phy_dev = phy_find_first(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!phy_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) netdev_err(dev, "no PHY found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) PHY_INTERFACE_MODE_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) netdev_err(dev, "Could not attach to PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) phy_set_max_speed(phy_dev, SPEED_100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void ax_phy_switch(struct net_device *dev, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u8 reg_gpoc = ax->plat->gpoc_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!!on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) reg_gpoc &= ~AX_GPOC_PPDSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) reg_gpoc |= AX_GPOC_PPDSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ax->reg_memr |= AX_MEMR_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ax->reg_memr &= ~AX_MEMR_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ei_outb(ax->reg_memr, ax->addr_memr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ax->reg_memr &= ~AX_MEMR_MDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ax->reg_memr |= AX_MEMR_MDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ei_outb(ax->reg_memr, ax->addr_memr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ax->reg_memr |= AX_MEMR_MDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ax->reg_memr &= ~AX_MEMR_MDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ei_outb(ax->reg_memr, ax->addr_memr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int reg_memr = ei_inb(ax->addr_memr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return reg_memr & AX_MEMR_MDI ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static const struct mdiobb_ops bb_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .set_mdc = ax_bb_mdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) .set_mdio_dir = ax_bb_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) .set_mdio_data = ax_bb_set_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .get_mdio_data = ax_bb_get_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int ax_mii_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct platform_device *pdev = to_platform_device(dev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ax->bb_ctrl.ops = &bb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ax->addr_memr = ei_local->mem + AX_MEMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!ax->mii_bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ax->mii_bus->name = "ax88796_mii_bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ax->mii_bus->parent = dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pdev->name, pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) err = mdiobus_register(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto out_free_mdio_bitbang;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) out_free_mdio_bitbang:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) free_mdio_bitbang(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int ax_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) netdev_dbg(dev, "open\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ret = ax_mii_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto failed_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ax->plat->check_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = request_irq(dev->irq, ax_ei_interrupt_filtered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ax->irqflags, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto failed_request_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* turn the phy on (if turned off) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ax_phy_switch(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ret = ax_mii_probe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto failed_mii_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) phy_start(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = ax_ei_open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto failed_ax_ei_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ax->running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) failed_ax_ei_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) phy_disconnect(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) failed_mii_probe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ax_phy_switch(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) failed_request_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* unregister mdiobus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) mdiobus_unregister(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) free_mdio_bitbang(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) failed_mii:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int ax_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) netdev_dbg(dev, "close\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ax->running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ax_ei_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* turn the phy off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ax_phy_switch(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) phy_disconnect(dev->phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) mdiobus_unregister(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) free_mdio_bitbang(ax->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct phy_device *phy_dev = dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!phy_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return phy_mii_ioctl(phy_dev, req, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* ethtool ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void ax_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct platform_device *pdev = to_platform_device(dev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static u32 ax_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ei_local->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void ax_set_msglevel(struct net_device *dev, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ei_local->msg_enable = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static const struct ethtool_ops ax_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .get_drvinfo = ax_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) .get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) .get_msglevel = ax_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .set_msglevel = ax_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .get_link_ksettings = phy_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .set_link_ksettings = phy_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #ifdef CONFIG_AX88796_93CX6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct ei_device *ei_local = eeprom->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u8 reg = ei_inb(ei_local->mem + AX_MEMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) eeprom->reg_data_in = reg & AX_MEMR_EEI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) eeprom->reg_data_clock = reg & AX_MEMR_EECLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) eeprom->reg_chip_select = reg & AX_MEMR_EECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct ei_device *ei_local = eeprom->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) u8 reg = ei_inb(ei_local->mem + AX_MEMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (eeprom->reg_data_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) reg |= AX_MEMR_EEI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (eeprom->reg_data_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) reg |= AX_MEMR_EECLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (eeprom->reg_chip_select)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) reg |= AX_MEMR_EECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ei_outb(reg, ei_local->mem + AX_MEMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static const struct net_device_ops ax_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) .ndo_open = ax_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) .ndo_stop = ax_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .ndo_do_ioctl = ax_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) .ndo_start_xmit = ax_ei_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) .ndo_tx_timeout = ax_ei_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) .ndo_get_stats = ax_ei_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) .ndo_set_rx_mode = ax_ei_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) .ndo_poll_controller = ax_ei_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* setup code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) void __iomem *ioaddr = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Select page 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* set to byte access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * ax_init_dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * initialise the specified device, taking care to note the MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * address it may already have (if configured), ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * the device is ready to be used by lib8390.c and registerd with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * the network layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static int ax_init_dev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) void __iomem *ioaddr = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned int start_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned int stop_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ret = ax_initial_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* setup goes here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ax_initial_setup(dev, ei_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* read the mac from the card prom if we need it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (ax->plat->flags & AXFLG_HAS_EEPROM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) unsigned char SA_prom[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ei_outb(6, ioaddr + EN0_RCNTLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ei_outb(0, ioaddr + EN0_RCNTHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) ei_outb(0, ioaddr + EN0_RSARLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ei_outb(0, ioaddr + EN0_RSARHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ei_outb(E8390_RREAD + E8390_START, ioaddr + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) for (i = 0; i < sizeof(SA_prom); i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ei_outb(ENISR_RDC, ioaddr + EN0_ISR); /* Ack intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (ax->plat->wordlength == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) SA_prom[i] = SA_prom[i+i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #ifdef CONFIG_AX88796_93CX6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (ax->plat->flags & AXFLG_HAS_93CX6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned char mac_addr[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct eeprom_93cx6 eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) eeprom.data = ei_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) eeprom.register_read = ax_eeprom_register_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) eeprom.register_write = ax_eeprom_register_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) eeprom.width = PCI_EEPROM_WIDTH_93C56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) eeprom_93cx6_multiread(&eeprom, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) (__le16 __force *)mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) sizeof(mac_addr) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ax->plat->wordlength == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* We must set the 8390 for word mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) start_page = NESM_START_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) stop_page = NESM_STOP_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) start_page = NE1SM_START_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) stop_page = NE1SM_STOP_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* load the mac-address from the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ei_local->mem + E8390_CMD); /* 0x61 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) for (i = 0; i < ETH_ALEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) dev->dev_addr[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ax->plat->mac_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!is_valid_ether_addr(dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) eth_hw_addr_random(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dev_info(&dev->dev, "Using random MAC address: %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ax_reset_8390(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ei_local->name = "AX88796";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ei_local->tx_start_page = start_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ei_local->stop_page = stop_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ei_local->word16 = (ax->plat->wordlength == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ei_local->rx_start_page = start_page + TX_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) #ifdef PACKETBUF_MEMSIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Allow the packet buffer size to be overridden by know-it-alls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ei_local->reset_8390 = &ax_reset_8390;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (ax->plat->block_input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ei_local->block_input = ax->plat->block_input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ei_local->block_input = &ax_block_input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (ax->plat->block_output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ei_local->block_output = ax->plat->block_output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ei_local->block_output = &ax_block_output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ei_local->get_8390_hdr = &ax_get_8390_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ei_local->priv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dev->netdev_ops = &ax_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dev->ethtool_ops = &ax_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ax_NS8390_init(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ret = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static int ax_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct net_device *dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct ax_device *ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct resource *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) iounmap(ei_local->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) release_mem_region(mem->start, resource_size(mem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ax->map2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) iounmap(ax->map2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) release_mem_region(mem->start, resource_size(mem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) platform_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * ax_probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * This is the entry point when the platform device system uses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * notify us of a new device to attach to. Allocate memory, find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * resources and information passed, and map the necessary registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static int ax_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct ei_device *ei_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct ax_device *ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct resource *irq, *mem, *mem2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) unsigned long mem_size, mem2_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (dev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* ok, let's setup our device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ax = to_ax_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ax->plat = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) platform_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ei_local->rxcr_base = ax->plat->rcr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* find the platform resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dev_err(&pdev->dev, "no IRQ specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto exit_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dev->irq = irq->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (irq->flags & IORESOURCE_IRQ_SHAREABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ax->irqflags |= IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) dev_err(&pdev->dev, "no MEM specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) goto exit_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) mem_size = resource_size(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * setup the register offsets from either the platform data or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * by using the size of the resource provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (ax->plat->reg_offsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ei_local->reg_offset = ax->plat->reg_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ei_local->reg_offset = ax->reg_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) for (ret = 0; ret < 0x18; ret++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!request_mem_region(mem->start, mem_size, pdev->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dev_err(&pdev->dev, "cannot reserve registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto exit_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ei_local->mem = ioremap(mem->start, mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dev->base_addr = (unsigned long)ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (ei_local->mem == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) goto exit_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* look for reset area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!mem2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!ax->plat->reg_offsets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) for (ret = 0; ret < 0x20; ret++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mem2_size = resource_size(mem2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dev_err(&pdev->dev, "cannot reserve registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) goto exit_mem1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ax->map2 = ioremap(mem2->start, mem2_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!ax->map2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dev_err(&pdev->dev, "cannot map reset register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) goto exit_mem2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* got resources, now initialise and register device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = ax_init_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (!ax->map2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto exit_mem1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) iounmap(ax->map2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) exit_mem2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (mem2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) release_mem_region(mem2->start, mem2_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) exit_mem1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) iounmap(ei_local->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) exit_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) release_mem_region(mem->start, mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) exit_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) platform_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* suspend and resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static int ax_suspend(struct platform_device *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct net_device *ndev = platform_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct ax_device *ax = to_ax_dev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ax->resume_open = ax->running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ax_close(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static int ax_resume(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct net_device *ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct ax_device *ax = to_ax_dev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ax_initial_setup(ndev, netdev_priv(ndev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ax_NS8390_init(ndev, ax->resume_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (ax->resume_open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ax_open(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #define ax_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) #define ax_resume NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static struct platform_driver axdrv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .name = "ax88796",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .probe = ax_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .remove = ax_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .suspend = ax_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) .resume = ax_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) module_platform_driver(axdrv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) MODULE_ALIAS("platform:ax88796");