^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Davicom DM9000 Fast Ethernet driver for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1997 Sten Wang
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Additional updates, Copyright:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Ben Dooks <ben@simtec.co.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Sascha Hauer <s.hauer@pengutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dm9000.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "dm9000.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Board/System/Debug information/definition ---------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DM9000_PHY 0x40 /* PHY address 0x01 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CARDNAME "dm9000"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Transmit timeout, default 5 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int watchdog = 5000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) module_param(watchdog, int, 0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Debug messages level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) module_param(debug, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* DM9000 register address locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * The DM9000 uses an address register to control where data written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * to the data register goes. This means that the address register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * must be preserved over interrupts or similar calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * During interrupt and other critical calls, a spinlock is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * protect the system, but the calls themselves save the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * in the address register in case they are interrupting another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * access to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * For general accesses a lock is provided so that calls which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * allowed to sleep are serialised so that the address register does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * not need to be saved. This lock also serves to serialise access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * to the EEPROM and PHY access registers which are shared between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * these two devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* The driver supports the original DM9000E, and now the two newer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * devices, DM9000A and DM9000B.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum dm9000_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) TYPE_DM9000E, /* original DM9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) TYPE_DM9000A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) TYPE_DM9000B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Structure/enum declaration ------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct board_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void __iomem *io_addr; /* Register I/O base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void __iomem *io_data; /* Data I/O address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u16 irq; /* IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u16 tx_pkt_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u16 queue_pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u16 queue_start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u16 queue_ip_summed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u16 dbug_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u8 io_mode; /* 0:word, 2:byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u8 phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u8 imr_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int in_timeout:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int in_suspend:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int wake_supported:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) enum dm9000_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void (*inblk)(void __iomem *port, void *data, int length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void (*outblk)(void __iomem *port, void *data, int length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void (*dumpblk)(void __iomem *port, int length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct device *dev; /* parent device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct resource *addr_res; /* resources found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct resource *data_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct resource *addr_req; /* resources requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct resource *data_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int irq_wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct mutex addr_lock; /* phy and eeprom access lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct delayed_work phy_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct mii_if_info mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 wake_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int ip_summed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct regulator *power_supply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* debug code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define dm9000_dbg(db, lev, msg...) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if ((lev) < debug) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dev_dbg(db->dev, msg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline struct board_info *to_dm9000_board(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* DM9000 network board routine ---------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Read a byte from I/O port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static u8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ior(struct board_info *db, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) writeb(reg, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return readb(db->io_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Write a byte to I/O port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) iow(struct board_info *db, int reg, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) writeb(reg, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) writeb(value, db->io_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dm9000_reset(struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dev_dbg(db->dev, "resetting device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * The essential point is that we have to do a double reset, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * instruction is to set LBK into MAC internal loopback mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) udelay(100); /* Application note says at least 20 us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (ior(db, DM9000_NCR) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_err(db->dev, "dm9000 did not respond to first reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) iow(db, DM9000_NCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (ior(db, DM9000_NCR) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dev_err(db->dev, "dm9000 did not respond to second reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* routines for sending block to chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) iowrite8_rep(reg, data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) iowrite16_rep(reg, data, (count+1) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) iowrite32_rep(reg, data, (count+3) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* input block from chip to memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ioread8_rep(reg, data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ioread16_rep(reg, data, (count+1) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ioread32_rep(reg, data, (count+3) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* dump block from chip to null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) tmp = readb(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) count = (count + 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) tmp = readw(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) count = (count + 3) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) tmp = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Sleep, either by using msleep() or if we are suspending, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * use mdelay() to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void dm9000_msleep(struct board_info *db, unsigned int ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (db->in_suspend || db->in_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mdelay(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) msleep(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Read a word from phyxcer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int reg_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mutex_lock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Save previous register address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) reg_save = readb(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Fill the phyxcer register into REG_0C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) iow(db, DM9000_EPAR, DM9000_PHY | reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Issue phyxcer read command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) writeb(reg_save, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dm9000_msleep(db, 1); /* Wait read complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) reg_save = readb(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* The read data keeps on REG_0D & REG_0E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* restore the previous address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) writeb(reg_save, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mutex_unlock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Write a word to phyxcer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dm9000_phy_write(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int phyaddr_unused, int reg, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long reg_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!db->in_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mutex_lock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Save previous register address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) reg_save = readb(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Fill the phyxcer register into REG_0C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) iow(db, DM9000_EPAR, DM9000_PHY | reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Fill the written data into REG_0D & REG_0E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) iow(db, DM9000_EPDRL, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) iow(db, DM9000_EPDRH, value >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Issue phyxcer write command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) writeb(reg_save, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dm9000_msleep(db, 1); /* Wait write complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) reg_save = readb(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* restore the previous address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) writeb(reg_save, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!db->in_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mutex_unlock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* dm9000_set_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * select the specified set of io routines to use with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void dm9000_set_io(struct board_info *db, int byte_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* use the size of the data resource to work out what IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * routines we want to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) switch (byte_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) db->dumpblk = dm9000_dumpblk_8bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) db->outblk = dm9000_outblk_8bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) db->inblk = dm9000_inblk_8bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) db->dumpblk = dm9000_dumpblk_16bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) db->outblk = dm9000_outblk_16bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) db->inblk = dm9000_inblk_16bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) db->dumpblk = dm9000_dumpblk_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) db->outblk = dm9000_outblk_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) db->inblk = dm9000_inblk_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void dm9000_schedule_poll(struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (db->type == TYPE_DM9000E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) schedule_delayed_work(&db->phy_poll, HZ * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dm9000_read_locked(struct board_info *db, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = ior(db, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int dm9000_wait_eeprom(struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int timeout = 8; /* wait max 8msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* The DM9000 data sheets say we should be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * poll the ERRE bit in EPCR to wait for the EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * operation. From testing several chips, this bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * does not seem to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * We attempt to use the bit, but fall back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * timeout (which is why we do not return an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * on expiry) to say that the EEPROM operation has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) status = dm9000_read_locked(db, DM9000_EPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if ((status & EPCR_ERRE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (timeout-- < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) dev_dbg(db->dev, "timeout waiting EEPROM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * Read a word data from EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (db->flags & DM9000_PLATF_NO_EEPROM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) to[0] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) to[1] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mutex_lock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) iow(db, DM9000_EPAR, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) iow(db, DM9000_EPCR, EPCR_ERPRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dm9000_wait_eeprom(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* delay for at-least 150uS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) iow(db, DM9000_EPCR, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) to[0] = ior(db, DM9000_EPDRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) to[1] = ior(db, DM9000_EPDRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mutex_unlock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Write a word data to SROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (db->flags & DM9000_PLATF_NO_EEPROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mutex_lock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) iow(db, DM9000_EPAR, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) iow(db, DM9000_EPDRH, data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) iow(db, DM9000_EPDRL, data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dm9000_wait_eeprom(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mdelay(1); /* wait at least 150uS to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) iow(db, DM9000_EPCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mutex_unlock(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* ethtool ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static void dm9000_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) strlcpy(info->driver, CARDNAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static u32 dm9000_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return dm->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void dm9000_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dm->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static int dm9000_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mii_ethtool_get_link_ksettings(&dm->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int dm9000_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static int dm9000_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return mii_nway_restart(&dm->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static int dm9000_set_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) netdev_features_t changed = dev->features ^ features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!(changed & NETIF_F_RXCSUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) spin_lock_irqsave(&dm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) spin_unlock_irqrestore(&dm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static u32 dm9000_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (dm->flags & DM9000_PLATF_EXT_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = mii_link_ok(&dm->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #define DM_EEPROM_MAGIC (0x444D394B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int dm9000_get_eeprom_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static int dm9000_get_eeprom(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct ethtool_eeprom *ee, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int offset = ee->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int len = ee->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* EEPROM access is aligned to two bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if ((len & 1) != 0 || (offset & 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (dm->flags & DM9000_PLATF_NO_EEPROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ee->magic = DM_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for (i = 0; i < len; i += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static int dm9000_set_eeprom(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct ethtool_eeprom *ee, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int offset = ee->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int len = ee->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* EEPROM access is aligned to two bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (dm->flags & DM9000_PLATF_NO_EEPROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (ee->magic != DM_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (len & 1 || offset & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int which = offset & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u8 tmp[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dm9000_read_eeprom(dm, offset / 2, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) tmp[which] = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dm9000_write_eeprom(dm, offset / 2, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dm9000_write_eeprom(dm, offset / 2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) done = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) data += done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) offset += done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) len -= done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) memset(w, 0, sizeof(struct ethtool_wolinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* note, we could probably support wake-phy too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) w->wolopts = dm->wake_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u32 opts = w->wolopts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 wcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!dm->wake_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (opts & ~WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (opts & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) wcr |= WCR_MAGICEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) mutex_lock(&dm->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_lock_irqsave(&dm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) iow(dm, DM9000_WCR, wcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) spin_unlock_irqrestore(&dm->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) mutex_unlock(&dm->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (dm->wake_state != opts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* change in wol state, update IRQ state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!dm->wake_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) irq_set_irq_wake(dm->irq_wake, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) else if (dm->wake_state && !opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) irq_set_irq_wake(dm->irq_wake, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dm->wake_state = opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static const struct ethtool_ops dm9000_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .get_drvinfo = dm9000_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .get_msglevel = dm9000_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .set_msglevel = dm9000_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .nway_reset = dm9000_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .get_link = dm9000_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .get_wol = dm9000_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .set_wol = dm9000_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .get_eeprom_len = dm9000_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .get_eeprom = dm9000_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .set_eeprom = dm9000_set_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .get_link_ksettings = dm9000_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) .set_link_ksettings = dm9000_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static void dm9000_show_carrier(struct board_info *db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned carrier, unsigned nsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int lpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct net_device *ndev = db->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct mii_if_info *mii = &db->mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (carrier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dev_info(db->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) (ncr & NCR_FDX) ? "full" : "half", lpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dev_info(db->dev, "%s: link down\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dm9000_poll_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct delayed_work *dw = to_delayed_work(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct board_info *db = container_of(dw, struct board_info, phy_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct net_device *ndev = db->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) !(db->flags & DM9000_PLATF_EXT_PHY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) unsigned new_carrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (old_carrier != new_carrier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (netif_msg_link(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dm9000_show_carrier(db, new_carrier, nsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!new_carrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) netif_carrier_off(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) netif_carrier_on(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mii_check_media(&db->mii, netif_msg_link(db), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dm9000_schedule_poll(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* dm9000_release_board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * release a board, and any mapped resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dm9000_release_board(struct platform_device *pdev, struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* unmap our resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) iounmap(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) iounmap(db->io_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* release the resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (db->data_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) release_resource(db->data_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) kfree(db->data_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (db->addr_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) release_resource(db->addr_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) kfree(db->addr_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static unsigned char dm9000_type_to_char(enum dm9000_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case TYPE_DM9000E: return 'e';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) case TYPE_DM9000A: return 'a';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) case TYPE_DM9000B: return 'b';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return '?';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Set DM9000 multicast address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dm9000_hash_table_unlocked(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int i, oft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u32 hash_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) dm9000_dbg(db, 1, "entering %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) iow(db, oft, dev->dev_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rcr |= RCR_PRMSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (dev->flags & IFF_ALLMULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rcr |= RCR_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* the multicast address in Hash Table : 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) hash_val = ether_crc_le(6, ha->addr) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Write the hash table to MAC MD table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) for (i = 0, oft = DM9000_MAR; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) iow(db, oft++, hash_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) iow(db, oft++, hash_table[i] >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) iow(db, DM9000_RCR, rcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) dm9000_hash_table(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dm9000_hash_table_unlocked(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dm9000_mask_interrupts(struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) iow(db, DM9000_IMR, IMR_PAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dm9000_unmask_interrupts(struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) iow(db, DM9000_IMR, db->imr_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Initialize dm9000 board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dm9000_init_dm9000(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) unsigned int imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned int ncr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dm9000_dbg(db, 1, "entering %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dm9000_reset(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dm9000_mask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* I/O mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* Checksum mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (dev->hw_features & NETIF_F_RXCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) iow(db, DM9000_RCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) iow(db, DM9000_GPR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* If we are dealing with DM9000B, some extra steps are required: a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * manual phy reset, and setting init params.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (db->type == TYPE_DM9000B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* if wol is needed, then always set NCR_WAKEEN otherwise we end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * up dumping the wake events if we disable this. There is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * a wake-mask in DM9000_WCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (db->wake_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ncr |= NCR_WAKEEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) iow(db, DM9000_NCR, ncr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Program operating register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) iow(db, DM9000_TCR, 0); /* TX Polling clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) iow(db, DM9000_FCR, 0xff); /* Flow Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) iow(db, DM9000_SMCR, 0); /* Special Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* clear TX status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Set address filter table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dm9000_hash_table_unlocked(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) imr = IMR_PAR | IMR_PTM | IMR_PRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (db->type != TYPE_DM9000E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) imr |= IMR_LNKCHNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) db->imr_all = imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* Init Driver variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) db->tx_pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) db->queue_pkt_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Our watchdog timed out. Called by the networking layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u8 reg_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* Save previous register address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) db->in_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) reg_save = readb(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) dm9000_init_dm9000(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) dm9000_unmask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* We can accept TX packets again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Restore previous register address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) writeb(reg_save, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) db->in_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static void dm9000_send_packet(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int ip_summed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) u16 pkt_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct board_info *dm = to_dm9000_board(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /* The DM9000 is not smart enough to leave fragmented packets alone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (dm->ip_summed != ip_summed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (ip_summed == CHECKSUM_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) iow(dm, DM9000_TCCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dm->ip_summed = ip_summed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* Set TX length to DM9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) iow(dm, DM9000_TXPLL, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) iow(dm, DM9000_TXPLH, pkt_len >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* Issue TX polling command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * Hardware start transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * Send a packet to media from the upper layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dm9000_dbg(db, 3, "%s:\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (db->tx_pkt_cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Move data to DM9000 TX RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) writeb(DM9000_MWCMD, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) (db->outblk)(db->io_data, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) db->tx_pkt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* TX control: First packet immediately send, second packet queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (db->tx_pkt_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dm9000_send_packet(dev, skb->ip_summed, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Second packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) db->queue_pkt_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) db->queue_ip_summed = skb->ip_summed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* free this SKB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * DM9000 interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * receive the packet to upper layer, free the transmitted packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int tx_status = ior(db, DM9000_NSR); /* Got TX status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (tx_status & (NSR_TX2END | NSR_TX1END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* One packet sent complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) db->tx_pkt_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (netif_msg_tx_done(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* Queue packet check & send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (db->tx_pkt_cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dm9000_send_packet(dev, db->queue_ip_summed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) db->queue_pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct dm9000_rxhdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) u8 RxPktReady;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) u8 RxStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) __le16 RxLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * Received a packet and pass to upper layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) dm9000_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct dm9000_rxhdr rxhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) u8 rxbyte, *rdptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) bool GoodPacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) int RxLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /* Check packet ready or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ior(db, DM9000_MRCMDX); /* Dummy read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* Get most updated data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) rxbyte = readb(db->io_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* Status check: this byte must be 0 or 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (rxbyte & DM9000_PKT_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_warn(db->dev, "status check fail: %d\n", rxbyte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) iow(db, DM9000_RCR, 0x00); /* Stop Device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!(rxbyte & DM9000_PKT_RDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* A packet ready now & Get status/length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) GoodPacket = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) writeb(DM9000_MRCMD, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) RxLen = le16_to_cpu(rxhdr.RxLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (netif_msg_rx_status(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev_dbg(db->dev, "RX: status %02x, length %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) rxhdr.RxStatus, RxLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Packet Status check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (RxLen < 0x40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) GoodPacket = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (netif_msg_rx_err(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (RxLen > DM9000_PKT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* rxhdr.RxStatus is identical to RSR register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) RSR_PLE | RSR_RWTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) RSR_LCS | RSR_RF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) GoodPacket = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (rxhdr.RxStatus & RSR_FOE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (netif_msg_rx_err(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) dev_dbg(db->dev, "fifo error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (rxhdr.RxStatus & RSR_CE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (netif_msg_rx_err(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dev_dbg(db->dev, "crc error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (rxhdr.RxStatus & RSR_RF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (netif_msg_rx_err(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dev_dbg(db->dev, "length error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /* Move data from DM9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (GoodPacket &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) rdptr = skb_put(skb, RxLen - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* Read received packet from RX SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) (db->inblk)(db->io_data, rdptr, RxLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dev->stats.rx_bytes += RxLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /* Pass to upper layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (dev->features & NETIF_F_RXCSUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* need to dump the packet's data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) (db->dumpblk)(db->io_data, RxLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) } while (rxbyte & DM9000_PKT_RDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) int int_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) u8 reg_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) dm9000_dbg(db, 3, "entering %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* A real interrupt coming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* holders of db->lock must always block IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* Save previous register address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) reg_save = readb(db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) dm9000_mask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /* Got DM9000 interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int_status = ior(db, DM9000_ISR); /* Got ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) iow(db, DM9000_ISR, int_status); /* Clear ISR status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (netif_msg_intr(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) dev_dbg(db->dev, "interrupt status %02x\n", int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Received the coming packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (int_status & ISR_PRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) dm9000_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* Transmit Interrupt check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (int_status & ISR_PTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) dm9000_tx_done(dev, db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (db->type != TYPE_DM9000E) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (int_status & ISR_LNKCHNG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* fire a link-change request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) schedule_delayed_work(&db->phy_poll, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dm9000_unmask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Restore previous register address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) writeb(reg_save, db->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) unsigned nsr, wcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) spin_lock_irqsave(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) nsr = ior(db, DM9000_NSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) wcr = ior(db, DM9000_WCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (nsr & NSR_WAKEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* clear, so we can avoid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) iow(db, DM9000_NSR, NSR_WAKEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (wcr & WCR_LINKST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) dev_info(db->dev, "wake by link status change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (wcr & WCR_SAMPLEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) dev_info(db->dev, "wake by sample packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (wcr & WCR_MAGICST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) dev_info(db->dev, "wake by magic packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) dev_err(db->dev, "wake signalled with no reason? "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) spin_unlock_irqrestore(&db->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) *Used by netconsole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static void dm9000_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dm9000_interrupt(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * Open the interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * The interface is opened whenever "ifconfig" actives it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) dm9000_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) unsigned int irq_flags = irq_get_trigger_type(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (netif_msg_ifup(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) dev_dbg(db->dev, "enabling %s\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* If there is no IRQ type specified, tell the user that this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (irq_flags == IRQF_TRIGGER_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) irq_flags |= IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) mdelay(1); /* delay needs by DM9000B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* Initialize DM9000 board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) dm9000_init_dm9000(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* Now that we have an interrupt handler hooked up we can unmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * our interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dm9000_unmask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* Init driver variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) db->dbug_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) mii_check_media(&db->mii, netif_msg_link(db), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* Poll initial link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) schedule_delayed_work(&db->phy_poll, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) dm9000_shutdown(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct board_info *db = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* RESET device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) dm9000_mask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) iow(db, DM9000_RCR, 0x00); /* Disable RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * Stop the interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * The interface is stopped when it is brought.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) dm9000_stop(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct board_info *db = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (netif_msg_ifdown(db))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) dev_dbg(db->dev, "shutting down %s\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) cancel_delayed_work_sync(&db->phy_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) netif_carrier_off(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* free interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) free_irq(ndev->irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dm9000_shutdown(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static const struct net_device_ops dm9000_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) .ndo_open = dm9000_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) .ndo_stop = dm9000_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) .ndo_start_xmit = dm9000_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) .ndo_tx_timeout = dm9000_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .ndo_set_rx_mode = dm9000_hash_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) .ndo_do_ioctl = dm9000_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) .ndo_set_features = dm9000_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) .ndo_poll_controller = dm9000_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct dm9000_plat_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) const void *mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (!IS_ENABLED(CONFIG_OF) || !np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return ERR_PTR(-ENXIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (of_find_property(np, "davicom,ext-phy", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) pdata->flags |= DM9000_PLATF_EXT_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (of_find_property(np, "davicom,no-eeprom", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) pdata->flags |= DM9000_PLATF_NO_EEPROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) mac_addr = of_get_mac_address(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (!IS_ERR(mac_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ether_addr_copy(pdata->dev_addr, mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return ERR_CAST(mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * Search DM9000 board, allocate space and register it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dm9000_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct board_info *db; /* Point a board information structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) const unsigned char *mac_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int iosize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) u32 id_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int reset_gpios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) enum of_gpio_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) struct regulator *power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) bool inv_mac_addr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) power = devm_regulator_get(dev, "vcc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (IS_ERR(power)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (PTR_ERR(power) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) dev_dbg(dev, "no regulator provided\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ret = regulator_enable(power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) "Failed to enable power regulator: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) dev_dbg(dev, "regulator enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (gpio_is_valid(reset_gpios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ret = devm_gpio_request_one(dev, reset_gpios, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) "dm9000_reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dev_err(dev, "failed to request reset gpio %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) reset_gpios, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) goto out_regulator_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* According to manual PWRST# Low Period Min 1ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) msleep(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) gpio_set_value(reset_gpios, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Needs 3ms to read eeprom when PWRST is deasserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) msleep(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (!pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) pdata = dm9000_parse_dt(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (IS_ERR(pdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) ret = PTR_ERR(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) goto out_regulator_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /* Init network device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ndev = alloc_etherdev(sizeof(struct board_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (!ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto out_regulator_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) SET_NETDEV_DEV(ndev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) dev_dbg(&pdev->dev, "dm9000_probe()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* setup board info structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) db = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) db->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) db->ndev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!IS_ERR(power))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) db->power_supply = power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) spin_lock_init(&db->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) mutex_init(&db->addr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (!db->addr_res || !db->data_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) db->addr_res, db->data_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ndev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (ndev->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ret = ndev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) db->irq_wake = platform_get_irq_optional(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (db->irq_wake >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) IRQF_SHARED, dev_name(db->dev), ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* test to see if irq is really wakeup capable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ret = irq_set_irq_wake(db->irq_wake, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) db->irq_wake, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) irq_set_irq_wake(db->irq_wake, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) db->wake_supported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) iosize = resource_size(db->addr_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) db->addr_req = request_mem_region(db->addr_res->start, iosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) pdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (db->addr_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) dev_err(db->dev, "cannot claim address reg area\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) db->io_addr = ioremap(db->addr_res->start, iosize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (db->io_addr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) dev_err(db->dev, "failed to ioremap address reg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) iosize = resource_size(db->data_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) db->data_req = request_mem_region(db->data_res->start, iosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) pdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (db->data_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) dev_err(db->dev, "cannot claim data reg area\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) db->io_data = ioremap(db->data_res->start, iosize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (db->io_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) dev_err(db->dev, "failed to ioremap data reg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* fill in parameters for net-dev structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) ndev->base_addr = (unsigned long)db->io_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /* ensure at least we have a default set of IO routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) dm9000_set_io(db, iosize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* check to see if anything is being over-ridden */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (pdata != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /* check to see if the driver wants to over-ride the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * default IO width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (pdata->flags & DM9000_PLATF_8BITONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) dm9000_set_io(db, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (pdata->flags & DM9000_PLATF_16BITONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) dm9000_set_io(db, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (pdata->flags & DM9000_PLATF_32BITONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) dm9000_set_io(db, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* check to see if there are any IO routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * over-rides */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (pdata->inblk != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) db->inblk = pdata->inblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (pdata->outblk != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) db->outblk = pdata->outblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (pdata->dumpblk != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) db->dumpblk = pdata->dumpblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) db->flags = pdata->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) db->flags |= DM9000_PLATF_SIMPLE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dm9000_reset(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* try multiple times, DM9000 sometimes gets the read wrong */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) id_val = ior(db, DM9000_VIDL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) id_val |= (u32)ior(db, DM9000_VIDH) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) id_val |= (u32)ior(db, DM9000_PIDL) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) id_val |= (u32)ior(db, DM9000_PIDH) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (id_val == DM9000_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (id_val != DM9000_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /* Identify what type of DM9000 we are working on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) id_val = ior(db, DM9000_CHIPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) switch (id_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) case CHIPR_DM9000A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) db->type = TYPE_DM9000A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) case CHIPR_DM9000B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) db->type = TYPE_DM9000B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) db->type = TYPE_DM9000E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /* dm9000a/b are capable of hardware checksum offload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) ndev->features |= ndev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /* from this point we assume that we have found a DM9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ndev->netdev_ops = &dm9000_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ndev->ethtool_ops = &dm9000_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) db->msg_enable = NETIF_MSG_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) db->mii.phy_id_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) db->mii.reg_num_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) db->mii.force_media = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) db->mii.full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) db->mii.dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) db->mii.mdio_read = dm9000_phy_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) db->mii.mdio_write = dm9000_phy_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) mac_src = "eeprom";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /* try reading the node address from the attached EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) for (i = 0; i < 6; i += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) mac_src = "platform data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (!is_valid_ether_addr(ndev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* try reading from mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) mac_src = "chip";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (!is_valid_ether_addr(ndev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) inv_mac_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) eth_hw_addr_random(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) mac_src = "random";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) platform_set_drvdata(pdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ret = register_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (inv_mac_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) ndev->name, dm9000_type_to_char(db->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) db->io_addr, db->io_data, ndev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ndev->dev_addr, mac_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) dev_err(db->dev, "not found (%d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) dm9000_release_board(pdev, db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) out_regulator_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (!IS_ERR(power))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) regulator_disable(power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) dm9000_drv_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct board_info *db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) db = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) db->in_suspend = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (!netif_running(ndev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /* only shutdown if not using WoL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (!db->wake_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) dm9000_shutdown(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) dm9000_drv_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct board_info *db = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /* reset if we were not in wake mode to ensure if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * the device was powered off it is in a known state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (!db->wake_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dm9000_init_dm9000(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dm9000_unmask_interrupts(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) db->in_suspend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static const struct dev_pm_ops dm9000_drv_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) .suspend = dm9000_drv_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) .resume = dm9000_drv_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) dm9000_drv_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) struct net_device *ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) struct board_info *dm = to_dm9000_board(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) dm9000_release_board(pdev, dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) free_netdev(ndev); /* free device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (dm->power_supply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) regulator_disable(dm->power_supply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) dev_dbg(&pdev->dev, "released and freed device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) static const struct of_device_id dm9000_of_matches[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) { .compatible = "davicom,dm9000", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) MODULE_DEVICE_TABLE(of, dm9000_of_matches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static struct platform_driver dm9000_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) .name = "dm9000",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) .pm = &dm9000_drv_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) .of_match_table = of_match_ptr(dm9000_of_matches),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) .probe = dm9000_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) .remove = dm9000_drv_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) module_platform_driver(dm9000_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) MODULE_DESCRIPTION("Davicom DM9000 network driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) MODULE_ALIAS("platform:dm9000");