^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Microchip ENCX24J600 ethernet driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 Gridpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Jon Ringle <jringle@gridpoint.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "encx24j600_hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DRV_NAME "encx24j600"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define DRV_VERSION "1.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int debug = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) module_param(debug, int, 0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* SRAM memory layout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define ENC_TX_BUF_START 0x0000U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define ENC_RX_BUF_START 0x0600U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define ENC_RX_BUF_END 0x5fffU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define ENC_SRAM_SIZE 0x6000U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) RXFILTER_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) RXFILTER_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) RXFILTER_PROMISC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct encx24j600_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct mutex lock; /* device access lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct encx24j600_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct sk_buff *tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct task_struct *kworker_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct kthread_worker kworker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct kthread_work tx_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct kthread_work setrx_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u16 next_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) bool hw_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) bool full_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u16 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int rxfilter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void dump_packet(const char *msg, int len, const char *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rsv *rsv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) netdev_info(dev, "RX packet Len:%d\n", rsv->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) rsv->next_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) RSV_GETBIT(rsv->rxstat, RSV_RXOK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int ret = regmap_read(priv->ctx.regmap, reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __func__, ret, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int ret = regmap_write(priv->ctx.regmap, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __func__, ret, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u16 mask, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __func__, ret, reg, val, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int ret = regmap_read(priv->ctx.phymap, reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) __func__, ret, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int ret = regmap_write(priv->ctx.phymap, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __func__, ret, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) encx24j600_update_reg(priv, reg, mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) encx24j600_update_reg(priv, reg, mask, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int ret = regmap_write(priv->ctx.regmap, cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __func__, ret, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mutex_lock(&priv->ctx.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mutex_unlock(&priv->ctx.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) const u8 *data, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mutex_lock(&priv->ctx.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_unlock(&priv->ctx.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (priv->autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) phcon1 |= ANEN | RENEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) phcon1 &= ~ANEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (priv->speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) phcon1 |= SPD100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) phcon1 &= ~SPD100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (priv->full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) phcon1 |= PFULDPX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) phcon1 &= ~PFULDPX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) encx24j600_write_phy(priv, PHCON1, phcon1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Waits for autonegotiation to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned long timeout = jiffies + msecs_to_jiffies(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u16 phstat1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u16 estat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) phstat1 = encx24j600_read_phy(priv, PHSTAT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) while ((phstat1 & ANDONE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u16 phstat3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) priv->autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) phstat3 = encx24j600_read_phy(priv, PHSTAT3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) priv->speed = (phstat3 & PHY3SPD100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ? SPEED_100 : SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) encx24j600_update_phcon1(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) priv->speed == SPEED_100 ? "100" : "10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) priv->full_duplex ? "Full" : "Half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) phstat1 = encx24j600_read_phy(priv, PHSTAT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) estat = encx24j600_read_reg(priv, ESTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (estat & PHYDPX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) encx24j600_set_bits(priv, MACON2, FULDPX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) encx24j600_write_reg(priv, MABBIPG, 0x15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) encx24j600_clr_bits(priv, MACON2, FULDPX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) encx24j600_write_reg(priv, MABBIPG, 0x12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Max retransmittions attempt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) encx24j600_write_reg(priv, MACLCON, 0x370f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Access the PHY to determine link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void encx24j600_check_link_status(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u16 estat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) estat = encx24j600_read_reg(priv, ESTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (estat & PHYLNK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (priv->autoneg == AUTONEG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) encx24j600_wait_for_autoneg(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) netif_info(priv, ifup, dev, "link up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) netif_info(priv, ifdown, dev, "link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Re-enable autoneg since we won't know what we might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * connected to when the link is brought back up again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) priv->autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) priv->full_duplex = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) priv->speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) netif_dbg(priv, intr, dev, "%s", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) encx24j600_check_link_status(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) encx24j600_clr_bits(priv, EIR, LINKIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!priv->tx_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dev->stats.tx_bytes += priv->tx_skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) dev_kfree_skb(priv->tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) priv->tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int encx24j600_receive_packet(struct encx24j600_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct rsv *rsv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pr_err_ratelimited("RX: OOM: packet dropped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) skb_reserve(skb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (netif_msg_pktdata(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dump_packet("RX", skb->len, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) skb->ip_summed = CHECKSUM_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Maintain stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dev->stats.rx_bytes += rsv->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) while (packet_count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct rsv rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) u16 newrxtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (netif_msg_rx_status(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) encx24j600_dump_rsv(priv, __func__, &rsv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) (rsv.len > MAX_FRAMELEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) netif_err(priv, rx_err, dev, "RX Error %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rsv.rxstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (rsv.len > MAX_FRAMELEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) encx24j600_receive_packet(priv, &rsv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) priv->next_packet = rsv.next_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) newrxtail = priv->next_packet - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (newrxtail == ENC_RX_BUF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) newrxtail = SRAM_SIZE - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) encx24j600_cmd(priv, SETPKTDEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) encx24j600_write_reg(priv, ERXTAIL, newrxtail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static irqreturn_t encx24j600_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct encx24j600_priv *priv = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int eir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) encx24j600_cmd(priv, CLREIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) eir = encx24j600_read_reg(priv, EIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (eir & LINKIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) encx24j600_int_link_handler(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (eir & TXIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) encx24j600_tx_complete(priv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (eir & TXABTIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) encx24j600_tx_complete(priv, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (eir & RXABTIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (eir & PCFULIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Packet counter is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) netif_err(priv, rx_err, dev, "Packet counter full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) encx24j600_clr_bits(priv, EIR, RXABTIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (eir & PKTIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u8 packet_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) while (packet_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) encx24j600_rx_packets(priv, packet_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) encx24j600_cmd(priv, SETEIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int encx24j600_soft_reset(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) u16 eudast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Write and verify a test value to EUDAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) regcache_cache_bypass(priv->ctx.regmap, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) timeout = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) eudast = encx24j600_read_reg(priv, EUDAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) usleep_range(25, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } while ((eudast != EUDAST_TEST_VAL) && --timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) regcache_cache_bypass(priv->ctx.regmap, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* Wait for CLKRDY to become set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) timeout = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) usleep_range(25, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Issue a System Reset command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) encx24j600_cmd(priv, SETETHRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) usleep_range(25, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Confirm that EUDAST has 0000h after system reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (encx24j600_read_reg(priv, EUDAST) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Wait for PHY register and status bits to become available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) usleep_range(256, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int encx24j600_hw_reset(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = encx24j600_soft_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) encx24j600_set_bits(priv, ECON2, TXRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) encx24j600_clr_bits(priv, ECON2, TXRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Reset TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) encx24j600_reset_hw_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Clear the TXIF flag if were previously set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Write the Tx Buffer pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) encx24j600_cmd(priv, DISABLERX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Set up RX packet start address in the SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Preload the RX Data pointer to the beginning of the RX area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) priv->next_packet = ENC_RX_BUF_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Set up RX end address in the SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Reset the user data pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Set Max Frame length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void encx24j600_dump_config(struct encx24j600_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) const char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pr_info(DRV_NAME ": %s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* CHIP configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ERXFCON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* MAC layer configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) MACLCON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) MABBIPG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* PHY configuation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) PHANLPA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) PHSTAT1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) PHSTAT2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) PHSTAT3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) switch (priv->rxfilter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) case RXFILTER_PROMISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) encx24j600_set_bits(priv, MACON1, PASSALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) case RXFILTER_MULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) encx24j600_clr_bits(priv, MACON1, PASSALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case RXFILTER_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) encx24j600_clr_bits(priv, MACON1, PASSALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void encx24j600_hw_init(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u16 macon2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) priv->hw_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* PHY Leds: link status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * LEDA: Link State + collision events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * LEDB: Link State + transmit/receive events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Loopback disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) encx24j600_write_reg(priv, MACON1, 0x9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* interpacket gap value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) encx24j600_write_reg(priv, MAIPG, 0x0c12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Write the auto negotiation pattern */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) encx24j600_update_phcon1(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) encx24j600_check_link_status(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) macon2 |= FULDPX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) encx24j600_set_bits(priv, MACON2, macon2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) priv->rxfilter = RXFILTER_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) encx24j600_set_rxfilter_mode(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Program the Maximum frame length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Init Tx pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) encx24j600_hw_init_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Init Rx pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) encx24j600_hw_init_rx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (netif_msg_hw(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) encx24j600_dump_config(priv, "Hw is initialized");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void encx24j600_hw_enable(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Clear the interrupt flags in case was set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) PKTIF | LINKIF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Enable the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) PKTIE | LINKIE | INTIE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Enable RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) encx24j600_cmd(priv, ENABLERX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) priv->hw_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static void encx24j600_hw_disable(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) encx24j600_write_reg(priv, EIE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* Disable RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) encx24j600_cmd(priv, DISABLERX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) priv->hw_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u8 duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!priv->hw_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* link is in low power mode now; duplex setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * will take effect on next encx24j600_hw_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (speed == SPEED_10 || speed == SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) priv->autoneg = (autoneg == AUTONEG_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) priv->full_duplex = (duplex == DUPLEX_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) priv->speed = (speed == SPEED_100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) netif_warn(priv, link, dev, "unsupported link speed setting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*speeds other than SPEED_10 and SPEED_100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*are not supported by chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned char *ethaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) val = encx24j600_read_reg(priv, MAADR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ethaddr[0] = val & 0x00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ethaddr[1] = (val & 0xff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) val = encx24j600_read_reg(priv, MAADR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ethaddr[2] = val & 0x00ffU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ethaddr[3] = (val & 0xff00U) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) val = encx24j600_read_reg(priv, MAADR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ethaddr[4] = val & 0x00ffU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ethaddr[5] = (val & 0xff00U) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Program the hardware MAC address from dev->dev_addr.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static int encx24j600_set_hw_macaddr(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (priv->hw_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dev->name, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dev->dev_addr[5] << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev->dev_addr[3] << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) dev->dev_addr[1] << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Store the new hardware address in dev->dev_addr, and update the MAC.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct sockaddr *address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!is_valid_ether_addr(address->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return encx24j600_set_hw_macaddr(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static int encx24j600_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) DRV_NAME, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) netdev_err(dev, "request irq %d failed (ret = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) priv->ctx.spi->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) encx24j600_hw_disable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) encx24j600_hw_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) encx24j600_hw_enable(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static int encx24j600_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) free_irq(priv->ctx.spi->irq, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static void encx24j600_setrx_proc(struct kthread_work *ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct encx24j600_priv *priv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) container_of(ws, struct encx24j600_priv, setrx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) encx24j600_set_rxfilter_mode(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static void encx24j600_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int oldfilter = priv->rxfilter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) netif_dbg(priv, link, dev, "promiscuous mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) priv->rxfilter = RXFILTER_PROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) netif_dbg(priv, link, dev, "%smulticast mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) (dev->flags & IFF_ALLMULTI) ? "all-" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) priv->rxfilter = RXFILTER_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) netif_dbg(priv, link, dev, "normal mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) priv->rxfilter = RXFILTER_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (oldfilter != priv->rxfilter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) kthread_queue_work(&priv->kworker, &priv->setrx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static void encx24j600_hw_tx(struct encx24j600_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) priv->tx_skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (netif_msg_pktdata(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (encx24j600_read_reg(priv, EIR) & TXABTIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Last transmition aborted due to error. Reset TX interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) encx24j600_reset_hw_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* Clear the TXIF flag if were previously set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) encx24j600_clr_bits(priv, EIR, TXIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Set the data pointer to the TX buffer address in the SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* Copy the packet into the SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) priv->tx_skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Program the Tx buffer start pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* Program the packet length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Start the transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) encx24j600_cmd(priv, SETTXRTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void encx24j600_tx_proc(struct kthread_work *ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct encx24j600_priv *priv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) container_of(ws, struct encx24j600_priv, tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) encx24j600_hw_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* save the timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Remember the skb for deferred processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) priv->tx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kthread_queue_work(&priv->kworker, &priv->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Deal with a transmit timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) jiffies, jiffies - dev_trans_start(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static int encx24j600_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return SFR_REG_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static void encx24j600_get_regs(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct ethtool_regs *regs, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) u16 *buff = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u8 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) regs->version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) mutex_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* ignore errors for unreadable registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) regmap_read(priv->ctx.regmap, reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) buff[reg] = val & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) mutex_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static void encx24j600_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) strlcpy(info->bus_info, dev_name(dev->dev.parent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int encx24j600_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u32 supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) SUPPORTED_Autoneg | SUPPORTED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) cmd->base.speed = priv->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) cmd->base.port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) encx24j600_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return encx24j600_setlink(dev, cmd->base.autoneg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cmd->base.speed, cmd->base.duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static u32 encx24j600_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return priv->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct encx24j600_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) priv->msg_enable = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static const struct ethtool_ops encx24j600_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .get_drvinfo = encx24j600_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .get_msglevel = encx24j600_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) .set_msglevel = encx24j600_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .get_regs_len = encx24j600_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) .get_regs = encx24j600_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .get_link_ksettings = encx24j600_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .set_link_ksettings = encx24j600_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static const struct net_device_ops encx24j600_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .ndo_open = encx24j600_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .ndo_stop = encx24j600_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .ndo_start_xmit = encx24j600_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .ndo_set_rx_mode = encx24j600_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .ndo_set_mac_address = encx24j600_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) .ndo_tx_timeout = encx24j600_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int encx24j600_spi_probe(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct encx24j600_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u16 eidled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) spi_set_drvdata(spi, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dev_set_drvdata(&spi->dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) SET_NETDEV_DEV(ndev, &spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) priv->ndev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* Default configuration PHY configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) priv->full_duplex = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) priv->autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) priv->speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) priv->ctx.spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ndev->irq = spi->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ndev->netdev_ops = &encx24j600_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) mutex_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* Reset device and check if it is connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (encx24j600_hw_reset(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) netif_err(priv, probe, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) DRV_NAME ": Chip is not detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Initialize the device HW to the consistent state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) encx24j600_hw_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kthread_init_worker(&priv->kworker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) "encx24j600");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (IS_ERR(priv->kworker_task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ret = PTR_ERR(priv->kworker_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Get the MAC address from the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ndev->ethtool_ops = &encx24j600_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret = register_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto out_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) eidled = encx24j600_read_reg(priv, EIDLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) netif_info(priv, probe, ndev, "Silicon rev ID: 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) (eidled & REVID_MASK) >> REVID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) unregister_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) out_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) kthread_stop(priv->kworker_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static int encx24j600_spi_remove(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) unregister_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) kthread_stop(priv->kworker_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) free_netdev(priv->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static const struct spi_device_id encx24j600_spi_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) { .name = "encx24j600" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static struct spi_driver encx24j600_spi_net_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .bus = &spi_bus_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .probe = encx24j600_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .remove = encx24j600_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .id_table = encx24j600_spi_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static int __init encx24j600_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return spi_register_driver(&encx24j600_spi_net_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) module_init(encx24j600_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static void encx24j600_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) spi_unregister_driver(&encx24j600_spi_net_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) module_exit(encx24j600_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) MODULE_ALIAS("spi:" DRV_NAME);