^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2008 JMicron Technology Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * https://www.jmicron.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "jme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static int force_pseudohp = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int no_pseudohp = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int no_extplug = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) module_param(force_pseudohp, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) MODULE_PARM_DESC(force_pseudohp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) module_param(no_pseudohp, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) module_param(no_extplug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) MODULE_PARM_DESC(no_extplug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) "Do not use external plug signal for pseudo hot-plug.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) jme_mdio_read(struct net_device *netdev, int phy, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int i, val, again = (reg == MII_BMSR) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) read_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) jwrite32(jme, JME_SMI, SMI_OP_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) smi_phy_addr(phy) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) smi_reg_addr(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) val = jread32(jme, JME_SMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if ((val & SMI_OP_REQ) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pr_err("phy(%d) read timeout : %d\n", phy, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (again--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) goto read_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) jme_mdio_write(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int phy, int reg, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) smi_phy_addr(phy) | smi_reg_addr(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pr_err("phy(%d) write timeout : %d\n", phy, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) jme_reset_phy_processor(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) jme_mdio_write(jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) MII_ADVERTISE, ADVERTISE_ALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) jme_mdio_write(jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) MII_CTRL1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ADVERTISE_1000FULL | ADVERTISE_1000HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) val = jme_mdio_read(jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) jme_mdio_write(jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) MII_BMCR, val | BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) jme_setup_wakeup_frame(struct jme_adapter *jme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) const u32 *mask, u32 crc, int fnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Setup CRC pattern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) jwrite32(jme, JME_WFODP, crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Setup Mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) jwrite32(jme, JME_WFOI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) (fnr & WFOI_FRAME_SEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) jwrite32(jme, JME_WFODP, mask[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) jme_mac_rxclk_off(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) jme_mac_rxclk_on(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) jme_mac_txclk_off(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) jwrite32f(jme, JME_GHC, jme->reg_ghc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) jme_mac_txclk_on(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 speed = jme->reg_ghc & GHC_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (speed == GHC_SPEED_1000M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) jwrite32f(jme, JME_GHC, jme->reg_ghc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) jme_reset_ghc_speed(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) jwrite32f(jme, JME_GHC, jme->reg_ghc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) jme_reset_250A2_workaround(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) GPREG1_RSSPATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) jme_assert_ghc_reset(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) jme->reg_ghc |= GHC_SWRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) jwrite32f(jme, JME_GHC, jme->reg_ghc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) jme_clear_ghc_reset(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) jme->reg_ghc &= ~GHC_SWRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) jwrite32f(jme, JME_GHC, jme->reg_ghc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) jme_reset_mac_processor(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 crc = 0xCDCDCDCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 gpreg0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) jme_reset_ghc_speed(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) jme_reset_250A2_workaround(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) jme_mac_rxclk_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) jme_mac_txclk_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) jme_assert_ghc_reset(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) jme_mac_rxclk_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) jme_mac_txclk_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) jme_clear_ghc_reset(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) jme_mac_rxclk_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) jme_mac_txclk_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) jme_mac_rxclk_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) jme_mac_txclk_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) jwrite32(jme, JME_RXDBA_LO, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) jwrite32(jme, JME_RXDBA_HI, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) jwrite32(jme, JME_RXQDC, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) jwrite32(jme, JME_RXNDA, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) jwrite32(jme, JME_TXDBA_LO, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) jwrite32(jme, JME_TXDBA_HI, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) jwrite32(jme, JME_TXQDC, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) jwrite32(jme, JME_TXNDA, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) jme_setup_wakeup_frame(jme, mask, crc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (jme->fpgaver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) gpreg0 = GPREG0_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) jwrite32(jme, JME_GPREG0, gpreg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) jme_clear_pm_enable_wol(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) jme_clear_pm_disable_wol(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) jwrite32(jme, JME_PMCS, PMCS_STMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) jme_reload_eeprom(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) val = jread32(jme, JME_SMBCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (val & SMBCSR_EEPROMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) val |= SMBCSR_CNACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) jwrite32(jme, JME_SMBCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) val |= SMBCSR_RELOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) jwrite32(jme, JME_SMBCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mdelay(12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pr_err("eeprom reload timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) jme_load_macaddr(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned char macaddr[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spin_lock_bh(&jme->macaddr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) val = jread32(jme, JME_RXUMA_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) macaddr[0] = (val >> 0) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) macaddr[1] = (val >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) macaddr[2] = (val >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) macaddr[3] = (val >> 24) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) val = jread32(jme, JME_RXUMA_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) macaddr[4] = (val >> 0) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) macaddr[5] = (val >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) spin_unlock_bh(&jme->macaddr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) jme_set_rx_pcc(struct jme_adapter *jme, int p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) switch (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case PCC_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) jwrite32(jme, JME_PCCRX0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) case PCC_P1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) jwrite32(jme, JME_PCCRX0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) case PCC_P2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) jwrite32(jme, JME_PCCRX0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case PCC_P3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) jwrite32(jme, JME_PCCRX0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) jme_start_irq(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) register struct dynpcc_info *dpi = &(jme->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) jme_set_rx_pcc(jme, PCC_P1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dpi->cur = PCC_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dpi->attempt = PCC_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dpi->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) jwrite32(jme, JME_PCCTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) PCCTXQ0_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Enable Interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) jwrite32(jme, JME_IENS, INTR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) jme_stop_irq(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Disable Interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) jwrite32f(jme, JME_IENC, INTR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) jme_linkstat_from_phy(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 phylink, bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (bmsr & BMSR_ANCOMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) phylink |= PHY_LINK_AUTONEG_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return phylink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) jme_set_phyfifo_5level(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) jme_set_phyfifo_8level(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) jme_check_link(struct net_device *netdev, int testonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) char linkmsg[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) linkmsg[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (jme->fpgaver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) phylink = jme_linkstat_from_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) phylink = jread32(jme, JME_PHY_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (phylink & PHY_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * If we did not enable AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Speed/Duplex Info should be obtained from SMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) phylink = PHY_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) bmcr = jme_mdio_read(jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) phylink |= ((bmcr & BMCR_SPEED1000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (bmcr & BMCR_SPEED100) == 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) PHY_LINK_SPEED_1000M :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) (bmcr & BMCR_SPEED100) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) PHY_LINK_SPEED_100M :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) PHY_LINK_SPEED_10M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) phylink |= (bmcr & BMCR_FULLDPLX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) PHY_LINK_DUPLEX : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) strcat(linkmsg, "Forced: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Keep polling for speed/duplex resolve complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) --cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (jme->fpgaver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) phylink = jme_linkstat_from_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) phylink = jread32(jme, JME_PHY_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pr_err("Waiting speed resolve timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) strcat(linkmsg, "ANed: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (jme->phylink == phylink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (testonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) jme->phylink = phylink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * The speed/duplex setting of jme->reg_ghc already cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * by jme_reset_mac_processor()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) switch (phylink & PHY_LINK_SPEED_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case PHY_LINK_SPEED_10M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) jme->reg_ghc |= GHC_SPEED_10M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) strcat(linkmsg, "10 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) case PHY_LINK_SPEED_100M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) jme->reg_ghc |= GHC_SPEED_100M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) strcat(linkmsg, "100 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case PHY_LINK_SPEED_1000M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) jme->reg_ghc |= GHC_SPEED_1000M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) strcat(linkmsg, "1000 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (phylink & PHY_LINK_DUPLEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) jme->reg_ghc |= GHC_DPX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) TXMCS_BACKOFF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) TXMCS_CARRIERSENSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) TXMCS_COLLISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) jwrite32(jme, JME_GHC, jme->reg_ghc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (is_buggy250(jme->pdev->device, jme->chiprev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) GPREG1_RSSPATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!(phylink & PHY_LINK_DUPLEX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) switch (phylink & PHY_LINK_SPEED_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case PHY_LINK_SPEED_10M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) jme_set_phyfifo_8level(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) jme->reg_gpreg1 |= GPREG1_RSSPATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case PHY_LINK_SPEED_100M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) jme_set_phyfifo_5level(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) jme->reg_gpreg1 |= GPREG1_RSSPATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case PHY_LINK_SPEED_1000M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) jme_set_phyfifo_8level(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) "Full-Duplex, " :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) "Half-Duplex, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) "MDI-X" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) "MDI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) netif_carrier_on(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (testonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) netif_info(jme, link, jme->dev, "Link is down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) jme->phylink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) jme_setup_tx_resources(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) TX_RING_ALLOC_SIZE(jme->tx_ring_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) &(txring->dmaalloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!txring->alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto err_set_null;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * 16 Bytes align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) txring->desc = (void *)ALIGN((unsigned long)(txring->alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) RING_DESC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) txring->next_to_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) atomic_set(&txring->next_to_clean, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) atomic_set(&txring->nr_free, jme->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) txring->bufinf = kcalloc(jme->tx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) sizeof(struct jme_buffer_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (unlikely(!(txring->bufinf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto err_free_txring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) err_free_txring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dma_free_coherent(&(jme->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) TX_RING_ALLOC_SIZE(jme->tx_ring_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) txring->alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) txring->dmaalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) err_set_null:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) txring->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) txring->dmaalloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) txring->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) txring->bufinf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) jme_free_tx_resources(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct jme_buffer_info *txbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (txring->alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (txring->bufinf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) for (i = 0 ; i < jme->tx_ring_size ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) txbi = txring->bufinf + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (txbi->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev_kfree_skb(txbi->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) txbi->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) txbi->mapping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) txbi->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) txbi->nr_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) txbi->start_xmit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) kfree(txring->bufinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dma_free_coherent(&(jme->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) TX_RING_ALLOC_SIZE(jme->tx_ring_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) txring->alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) txring->dmaalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) txring->alloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) txring->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) txring->dmaalloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) txring->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) txring->bufinf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) txring->next_to_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) atomic_set(&txring->next_to_clean, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) atomic_set(&txring->nr_free, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) jme_enable_tx_engine(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Select Queue 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Setup TX Queue 0 DMA Bass Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Setup TX Descptor Count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Enable TX Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) jwrite32f(jme, JME_TXCS, jme->reg_txcs |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) TXCS_SELECT_QUEUE0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) TXCS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * Start clock for TX MAC Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) jme_mac_txclk_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) jme_disable_tx_engine(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Disable TX Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) val = jread32(jme, JME_TXCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) val = jread32(jme, JME_TXCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pr_err("Disable TX engine timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Stop clock for TX MAC Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) jme_mac_txclk_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) register struct rxdesc *rxdesc = rxring->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct jme_buffer_info *rxbi = rxring->bufinf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) rxdesc += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rxbi += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rxdesc->dw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rxdesc->dw[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) rxdesc->desc1.bufaddrl = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) (__u64)rxbi->mapping & 0xFFFFFFFFUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (jme->dev->features & NETIF_F_HIGHDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) rxdesc->desc1.flags = RXFLAG_64BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) jme_make_new_rx_buf(struct jme_adapter *jme, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct jme_buffer_info *rxbi = rxring->bufinf + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) skb = netdev_alloc_skb(jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) jme->dev->mtu + RX_EXTRA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) offset_in_page(skb->data), skb_tailroom(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (likely(rxbi->mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pci_unmap_page(jme->pdev, rxbi->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rxbi->len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) rxbi->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) rxbi->len = skb_tailroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) rxbi->mapping = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) jme_free_rx_buf(struct jme_adapter *jme, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct jme_buffer_info *rxbi = rxring->bufinf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rxbi += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (rxbi->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pci_unmap_page(jme->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rxbi->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) rxbi->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev_kfree_skb(rxbi->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rxbi->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) rxbi->mapping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) rxbi->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) jme_free_rx_resources(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (rxring->alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (rxring->bufinf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) for (i = 0 ; i < jme->rx_ring_size ; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) jme_free_rx_buf(jme, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) kfree(rxring->bufinf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dma_free_coherent(&(jme->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) RX_RING_ALLOC_SIZE(jme->rx_ring_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) rxring->alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) rxring->dmaalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rxring->alloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) rxring->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) rxring->dmaalloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) rxring->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rxring->bufinf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) rxring->next_to_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) atomic_set(&rxring->next_to_clean, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) jme_setup_rx_resources(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) RX_RING_ALLOC_SIZE(jme->rx_ring_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) &(rxring->dmaalloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!rxring->alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto err_set_null;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * 16 Bytes align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) RING_DESC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) rxring->next_to_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) atomic_set(&rxring->next_to_clean, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) rxring->bufinf = kcalloc(jme->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) sizeof(struct jme_buffer_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (unlikely(!(rxring->bufinf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) goto err_free_rxring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Initiallize Receive Descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) for (i = 0 ; i < jme->rx_ring_size ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (unlikely(jme_make_new_rx_buf(jme, i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) jme_free_rx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) jme_set_clean_rxdesc(jme, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) err_free_rxring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dma_free_coherent(&(jme->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) RX_RING_ALLOC_SIZE(jme->rx_ring_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rxring->alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) rxring->dmaalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) err_set_null:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) rxring->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rxring->dmaalloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) rxring->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) rxring->bufinf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) jme_enable_rx_engine(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Select Queue 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) jwrite32(jme, JME_RXCS, jme->reg_rxcs |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) RXCS_QUEUESEL_Q0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Setup RX DMA Bass Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Setup RX Descriptor Count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Setup Unicast Filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) jme_set_unicastaddr(jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) jme_set_multi(jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Enable RX Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) RXCS_QUEUESEL_Q0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) RXCS_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) RXCS_QST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Start clock for RX MAC Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) jme_mac_rxclk_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) jme_restart_rx_engine(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * Start RX Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) jwrite32(jme, JME_RXCS, jme->reg_rxcs |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) RXCS_QUEUESEL_Q0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) RXCS_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) RXCS_QST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) jme_disable_rx_engine(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Disable RX Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) jwrite32(jme, JME_RXCS, jme->reg_rxcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) val = jread32(jme, JME_RXCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) val = jread32(jme, JME_RXCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pr_err("Disable RX engine timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Stop clock for RX MAC Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) jme_mac_rxclk_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static u16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) jme_udpsum(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u16 csum = 0xFFFFu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (skb->protocol != htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) skb_set_network_header(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) (skb->len < (ETH_HLEN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) (ip_hdr(skb)->ihl << 2) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) sizeof(struct udphdr)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) skb_set_transport_header(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ETH_HLEN + (ip_hdr(skb)->ihl << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) csum = udp_hdr(skb)->check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) == RXWBFLAG_TCPON)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (flags & RXWBFLAG_IPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (flags & RXWBFLAG_IPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) == RXWBFLAG_IPV4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct rxdesc *rxdesc = rxring->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct jme_buffer_info *rxbi = rxring->bufinf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int framesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) rxdesc += idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rxbi += idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) skb = rxbi->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) pci_dma_sync_single_for_cpu(jme->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) rxbi->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) rxbi->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (unlikely(jme_make_new_rx_buf(jme, idx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) pci_dma_sync_single_for_device(jme->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) rxbi->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) rxbi->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ++(NET_STAT(jme).rx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) framesize = le16_to_cpu(rxdesc->descwb.framesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) - RX_PREPAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) skb_reserve(skb, RX_PREPAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) skb_put(skb, framesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) skb->protocol = eth_type_trans(skb, jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) NET_STAT(jme).rx_bytes += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) jme->jme_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) cpu_to_le16(RXWBFLAG_DEST_MUL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ++(NET_STAT(jme).multicast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) NET_STAT(jme).rx_bytes += framesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ++(NET_STAT(jme).rx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) jme_set_clean_rxdesc(jme, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) jme_process_receive(struct jme_adapter *jme, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct jme_ring *rxring = &(jme->rxring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct rxdesc *rxdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) goto out_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (unlikely(atomic_read(&jme->link_changing) != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto out_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (unlikely(!netif_carrier_ok(jme->dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto out_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) i = atomic_read(&rxring->next_to_clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) while (limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) rxdesc = rxring->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) rxdesc += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) --limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (unlikely(desccnt > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ++(NET_STAT(jme).rx_crc_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ++(NET_STAT(jme).rx_fifo_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ++(NET_STAT(jme).rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (desccnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) limit -= desccnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) for (j = i, ccnt = desccnt ; ccnt-- ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) jme_set_clean_rxdesc(jme, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) j = (j + 1) & (mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) jme_alloc_and_feed_skb(jme, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) i = (i + desccnt) & (mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) atomic_set(&rxring->next_to_clean, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) out_inc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) atomic_inc(&jme->rx_cleaning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return limit > 0 ? limit : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (likely(atmp == dpi->cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) dpi->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (dpi->attempt == atmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ++(dpi->cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dpi->attempt = atmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dpi->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) jme_dynamic_pcc(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) register struct dynpcc_info *dpi = &(jme->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) jme_attempt_pcc(dpi, PCC_P3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dpi->intr_cnt > PCC_INTR_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) jme_attempt_pcc(dpi, PCC_P2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) jme_attempt_pcc(dpi, PCC_P1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (dpi->attempt < dpi->cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) tasklet_schedule(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) jme_set_rx_pcc(jme, dpi->attempt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) dpi->cur = dpi->attempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dpi->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) jme_start_pcc_timer(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct dynpcc_info *dpi = &(jme->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dpi->last_bytes = NET_STAT(jme).rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dpi->last_pkts = NET_STAT(jme).rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dpi->intr_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) jwrite32(jme, JME_TMCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) jme_stop_pcc_timer(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) jwrite32(jme, JME_TMCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) jme_shutdown_nic(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) u32 phylink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) phylink = jme_linkstat_from_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (!(phylink & PHY_LINK_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * Disable all interrupt before issue timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) jme_stop_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) jme_pcc_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct net_device *netdev = jme->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) jme_shutdown_nic(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (unlikely(!netif_carrier_ok(netdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) (atomic_read(&jme->link_changing) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) )) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) jme_stop_pcc_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) jme_dynamic_pcc(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) jme_start_pcc_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) jme_polling_mode(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) jme_set_rx_pcc(jme, PCC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) jme_interrupt_mode(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) jme_set_rx_pcc(jme, PCC_P1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) u32 apmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) apmc = jread32(jme, JME_APMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return apmc & JME_APMC_PSEUDO_HP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) jme_start_shutdown_timer(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) u32 apmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) apmc &= ~JME_APMC_EPIEN_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!no_extplug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) jwrite32f(jme, JME_APMC, apmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) jwrite32f(jme, JME_TIMER2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) jwrite32(jme, JME_TMCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) jme_stop_shutdown_timer(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) u32 apmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) jwrite32f(jme, JME_TMCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) jwrite32f(jme, JME_TIMER2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) apmc = jread32(jme, JME_APMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) jwrite32f(jme, JME_APMC, apmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void jme_link_change_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct net_device *netdev = jme->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) while (!atomic_dec_and_test(&jme->link_changing)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) atomic_inc(&jme->link_changing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) while (atomic_read(&jme->link_changing) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) jme->old_mtu = netdev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (jme_pseudo_hotplug_enabled(jme))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) jme_stop_shutdown_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) jme_stop_pcc_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) tasklet_disable(&jme->txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) tasklet_disable(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) tasklet_disable(&jme->rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (netif_carrier_ok(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) jme_disable_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) jme_disable_tx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) jme_reset_mac_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) jme_free_rx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) jme_free_tx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (test_bit(JME_FLAG_POLL, &jme->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) jme_polling_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) jme_check_link(netdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (netif_carrier_ok(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) rc = jme_setup_rx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) pr_err("Allocating resources for RX error, Device STOPPED!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) goto out_enable_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) rc = jme_setup_tx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) pr_err("Allocating resources for TX error, Device STOPPED!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto err_out_free_rx_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) jme_enable_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) jme_enable_tx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) netif_start_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (test_bit(JME_FLAG_POLL, &jme->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) jme_interrupt_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) jme_start_pcc_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else if (jme_pseudo_hotplug_enabled(jme)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) jme_start_shutdown_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) goto out_enable_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) err_out_free_rx_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) jme_free_rx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) out_enable_tasklet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) tasklet_enable(&jme->txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) tasklet_enable(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) tasklet_enable(&jme->rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) atomic_inc(&jme->link_changing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) jme_rx_clean_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct dynpcc_info *dpi = &(jme->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) jme_process_receive(jme, jme->rx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ++(dpi->intr_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct jme_adapter *jme = jme_napi_priv(holder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) int rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) while (atomic_read(&jme->rx_empty) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) atomic_dec(&jme->rx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) ++(NET_STAT(jme).rx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) jme_restart_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) atomic_inc(&jme->rx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (rest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) JME_RX_COMPLETE(netdev, holder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) jme_interrupt_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) JME_NAPI_WEIGHT_SET(budget, rest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return JME_NAPI_WEIGHT_VAL(budget) - rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) jme_rx_empty_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (unlikely(atomic_read(&jme->link_changing) != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (unlikely(!netif_carrier_ok(jme->dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) jme_rx_clean_tasklet(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) while (atomic_read(&jme->rx_empty) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) atomic_dec(&jme->rx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ++(NET_STAT(jme).rx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) jme_restart_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) atomic_inc(&jme->rx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) jme_wake_queue_if_stopped(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (unlikely(netif_queue_stopped(jme->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) netif_wake_queue(jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static void jme_tx_clean_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct txdesc *txdesc = txring->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) int i, j, cnt = 0, max, err, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) tx_dbg(jme, "Into txclean\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (unlikely(atomic_read(&jme->link_changing) != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (unlikely(!netif_carrier_ok(jme->dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) max = jme->tx_ring_size - atomic_read(&txring->nr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) mask = jme->tx_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) ctxbi = txbi + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (likely(ctxbi->skb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) tx_dbg(jme, "txclean: %d+%d@%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) i, ctxbi->nr_desc, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ttxbi = txbi + ((i + j) & (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) txdesc[(i + j) & (mask)].dw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) pci_unmap_page(jme->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) ttxbi->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ttxbi->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ttxbi->mapping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ttxbi->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) dev_kfree_skb(ctxbi->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) cnt += ctxbi->nr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) ++(NET_STAT(jme).tx_carrier_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) ++(NET_STAT(jme).tx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) NET_STAT(jme).tx_bytes += ctxbi->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ctxbi->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ctxbi->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ctxbi->start_xmit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) i = (i + ctxbi->nr_desc) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) ctxbi->nr_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) atomic_set(&txring->next_to_clean, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) atomic_add(cnt, &txring->nr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) jme_wake_queue_if_stopped(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) atomic_inc(&jme->tx_cleaning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * Disable interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) jwrite32f(jme, JME_IENC, INTR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * Link change event is critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * all other events are ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) jwrite32(jme, JME_IEVE, intrstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) tasklet_schedule(&jme->linkch_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) goto out_reenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (intrstat & INTR_TMINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) jwrite32(jme, JME_IEVE, INTR_TMINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) tasklet_schedule(&jme->pcc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) tasklet_schedule(&jme->txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) INTR_PCCRX0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) INTR_RX0EMP)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) INTR_RX0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (test_bit(JME_FLAG_POLL, &jme->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (intrstat & INTR_RX0EMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) atomic_inc(&jme->rx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (likely(JME_RX_SCHEDULE_PREP(jme))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) jme_polling_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) JME_RX_SCHEDULE(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (intrstat & INTR_RX0EMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) atomic_inc(&jme->rx_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) tasklet_hi_schedule(&jme->rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) tasklet_hi_schedule(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) out_reenable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Re-enable interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) jwrite32f(jme, JME_IENS, INTR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) jme_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct net_device *netdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) u32 intrstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) intrstat = jread32(jme, JME_IEVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * Check if it's really an interrupt for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (unlikely((intrstat & INTR_ENABLE) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * Check if the device still exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (unlikely(intrstat == ~((typeof(intrstat))0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) jme_intr_msi(jme, intrstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) jme_msi(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) struct net_device *netdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) u32 intrstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) intrstat = jread32(jme, JME_IEVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) jme_intr_msi(jme, intrstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) jme_reset_link(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) jme_restart_an(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) spin_lock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) spin_unlock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) jme_request_irq(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct net_device *netdev = jme->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) irq_handler_t handler = jme_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) int irq_flags = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (!pci_enable_msi(jme->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) set_bit(JME_FLAG_MSI, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) handler = jme_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) irq_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) netdev_err(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) "Unable to request %s interrupt (return: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (test_bit(JME_FLAG_MSI, &jme->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) pci_disable_msi(jme->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) clear_bit(JME_FLAG_MSI, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) netdev->irq = jme->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) jme_free_irq(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) free_irq(jme->pdev->irq, jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (test_bit(JME_FLAG_MSI, &jme->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) pci_disable_msi(jme->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) clear_bit(JME_FLAG_MSI, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) jme->dev->irq = jme->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) jme_new_phy_on(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) reg = jread32(jme, JME_PHY_PWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) jwrite32(jme, JME_PHY_PWR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) reg &= ~PE1_GPREG0_PBG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) reg |= PE1_GPREG0_ENBG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) jme_new_phy_off(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) reg = jread32(jme, JME_PHY_PWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) jwrite32(jme, JME_PHY_PWR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) reg &= ~PE1_GPREG0_PBG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) reg |= PE1_GPREG0_PDD3COLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) jme_phy_on(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) bmcr &= ~BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (new_phy_power_ctrl(jme->chip_main_rev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) jme_new_phy_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) jme_phy_off(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) bmcr |= BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (new_phy_power_ctrl(jme->chip_main_rev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) jme_new_phy_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) u32 phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) phy_addr = JM_PHY_SPEC_REG_READ | specreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) JM_PHY_SPEC_DATA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) u32 phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) phy_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) jme_phy_calibration(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) u32 ctrl1000, phy_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) jme_phy_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) jme_phy_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* Enabel PHY test mode 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) ctrl1000 |= PHY_GAD_TEST_MODE_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) JM_PHY_EXT_COMM_2_CALI_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) JM_PHY_EXT_COMM_2_CALI_MODE_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) JM_PHY_EXT_COMM_2_CALI_LATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* Disable PHY test mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) jme_phy_setEA(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) u32 phy_comm0 = 0, phy_comm1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) u8 nic_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) switch (jme->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) case PCI_DEVICE_ID_JMICRON_JMC250:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (((jme->chip_main_rev == 5) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) (jme->chip_sub_rev == 3))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) (jme->chip_main_rev >= 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) phy_comm0 = 0x008A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) phy_comm1 = 0x4109;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if ((jme->chip_main_rev == 3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) phy_comm0 = 0xE088;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) case PCI_DEVICE_ID_JMICRON_JMC260:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (((jme->chip_main_rev == 5) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) (jme->chip_sub_rev == 3))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) (jme->chip_main_rev >= 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) phy_comm0 = 0x008A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) phy_comm1 = 0x4109;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if ((jme->chip_main_rev == 3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) phy_comm0 = 0xE088;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) phy_comm0 = 0x608A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) phy_comm0 = 0x408A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (phy_comm0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (phy_comm1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) jme_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) jme_clear_pm_disable_wol(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) JME_NAPI_ENABLE(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) rc = jme_request_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) jme_start_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) jme_phy_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (test_bit(JME_FLAG_SSET, &jme->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) jme_set_link_ksettings(netdev, &jme->old_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) jme_reset_phy_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) jme_phy_calibration(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) jme_phy_setEA(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) jme_reset_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) jme_set_100m_half(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) u32 bmcr, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) jme_phy_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) BMCR_SPEED1000 | BMCR_FULLDPLX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) tmp |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (bmcr != tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (jme->fpgaver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) jwrite32(jme, JME_GHC, GHC_SPEED_100M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) jme_wait_link(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) u32 phylink, to = JME_WAIT_LINK_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) phylink = jme_linkstat_from_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) usleep_range(10000, 11000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) phylink = jme_linkstat_from_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) jme_powersave_phy(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) jme_set_100m_half(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) jme_wait_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) jme_clear_pm_enable_wol(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) jme_phy_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) jme_close(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) jme_stop_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) jme_free_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) JME_NAPI_DISABLE(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) tasklet_kill(&jme->linkch_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) tasklet_kill(&jme->txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) tasklet_kill(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) tasklet_kill(&jme->rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) jme_disable_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) jme_disable_tx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) jme_reset_mac_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) jme_free_rx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) jme_free_tx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) jme->phylink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) jme_phy_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) jme_alloc_txdesc(struct jme_adapter *jme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) int idx, nr_alloc, mask = jme->tx_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) idx = txring->next_to_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) nr_alloc = skb_shinfo(skb)->nr_frags + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) atomic_sub(nr_alloc, &txring->nr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) jme_fill_tx_map(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct txdesc *txdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) struct jme_buffer_info *txbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) u32 page_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) bool hidma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) dma_addr_t dmaaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dmaaddr = pci_map_page(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) page_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) pci_dma_sync_single_for_device(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) dmaaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) txdesc->dw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) txdesc->dw[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) txdesc->desc2.flags = TXFLAG_OWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) txdesc->desc2.datalen = cpu_to_le16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) txdesc->desc2.bufaddrl = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) (__u64)dmaaddr & 0xFFFFFFFFUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) txbi->mapping = dmaaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) txbi->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) int mask = jme->tx_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) for (j = 0 ; j < count ; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) ctxbi = txbi + ((startidx + j + 2) & (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) pci_unmap_page(jme->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) ctxbi->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ctxbi->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) ctxbi->mapping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ctxbi->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) struct txdesc *txdesc = txring->desc, *ctxdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) int i, nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int mask = jme->tx_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) for (i = 0 ; i < nr_frags ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) ctxdesc = txdesc + ((idx + i + 2) & (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) ctxbi = txbi + ((idx + i + 2) & (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) skb_frag_page(frag), skb_frag_off(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) skb_frag_size(frag), hidma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) jme_drop_tx_map(jme, idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) ctxdesc = txdesc + ((idx + 1) & (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) ctxbi = txbi + ((idx + 1) & (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) offset_in_page(skb->data), len, hidma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) jme_drop_tx_map(jme, idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (*mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) *flags |= TXFLAG_LSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) iph->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) iph->daddr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) IPPROTO_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) tcp_v6_gso_csum_prep(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) u8 ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) switch (skb->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ip_proto = ip_hdr(skb)->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ip_proto = ipv6_hdr(skb)->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) ip_proto = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) switch (ip_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) *flags |= TXFLAG_TCPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) *flags |= TXFLAG_UDPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) *flags |= TXFLAG_TAGON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) struct txdesc *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) struct jme_buffer_info *txbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) txdesc = (struct txdesc *)txring->desc + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) txbi = txring->bufinf + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) txdesc->dw[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) txdesc->dw[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) txdesc->dw[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) txdesc->dw[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) txdesc->desc1.pktsize = cpu_to_le16(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * Set OWN bit at final.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * When kernel transmit faster than NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * And NIC trying to send this descriptor before we tell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * it to start sending this TX queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * Other fields are already filled correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) flags = TXFLAG_OWN | TXFLAG_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * Set checksum flags while not tso
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) jme_tx_csum(jme, skb, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) ret = jme_map_tx_skb(jme, skb, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) txdesc->desc1.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * Set tx buffer info after telling NIC to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * For better tx_clean timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) txbi->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) txbi->len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) txbi->start_xmit = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (!txbi->start_xmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) txbi->start_xmit = (0UL-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) jme_stop_queue_if_full(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct jme_ring *txring = &(jme->txring[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) struct jme_buffer_info *txbi = txring->bufinf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) int idx = atomic_read(&txring->next_to_clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) txbi += idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) netif_stop_queue(jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (atomic_read(&txring->nr_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) >= (jme->tx_wake_threshold)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) netif_wake_queue(jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (unlikely(txbi->start_xmit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) txbi->skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) netif_stop_queue(jme->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) netif_info(jme, tx_queued, jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) "TX Queue Stopped %d@%lu\n", idx, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * This function is already protected by netif_tx_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) ++(NET_STAT(jme).tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) idx = jme_alloc_txdesc(jme, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (unlikely(idx < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) netif_err(jme, tx_err, jme->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) "BUG! Tx ring full when queue awake!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (jme_fill_tx_desc(jme, skb, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) jwrite32(jme, JME_TXCS, jme->reg_txcs |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) TXCS_SELECT_QUEUE0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) TXCS_QUEUE0S |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) TXCS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) tx_dbg(jme, "xmit: %d+%d@%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) jme_stop_queue_if_full(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) jme_set_unicastaddr(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) val = (netdev->dev_addr[3] & 0xff) << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) (netdev->dev_addr[2] & 0xff) << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) (netdev->dev_addr[1] & 0xff) << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) (netdev->dev_addr[0] & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) jwrite32(jme, JME_RXUMA_LO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) val = (netdev->dev_addr[5] & 0xff) << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) (netdev->dev_addr[4] & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) jwrite32(jme, JME_RXUMA_HI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) jme_set_macaddr(struct net_device *netdev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) spin_lock_bh(&jme->macaddr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) jme_set_unicastaddr(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) spin_unlock_bh(&jme->macaddr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) jme_set_multi(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) u32 mc_hash[2] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) spin_lock_bh(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (netdev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) jme->reg_rxmcs |= RXMCS_ALLFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) } else if (netdev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) } else if (netdev->flags & IFF_MULTICAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) int bit_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) netdev_for_each_mc_addr(ha, netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) spin_unlock_bh(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) jme_change_mtu(struct net_device *netdev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) netdev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) netdev_update_features(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) jme_restart_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) jme_reset_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) jme_tx_timeout(struct net_device *netdev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) jme->phylink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) jme_reset_phy_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (test_bit(JME_FLAG_SSET, &jme->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) jme_set_link_ksettings(netdev, &jme->old_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * Force to Reset the link again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) jme_reset_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) jme_get_drvinfo(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) jme_get_regs_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return JME_REG_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) for (i = 0 ; i < len ; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) p[i >> 2] = jread32(jme, reg + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) u16 *p16 = (u16 *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) for (i = 0 ; i < reg_nr ; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) u32 *p32 = (u32 *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) memset(p, 0xFF, JME_REG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) regs->version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) p32 += 0x100 >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) p32 += 0x100 >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) p32 += 0x100 >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) p32 += 0x100 >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) mdio_memcpy(jme, p32, JME_PHY_REG_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) ecmd->tx_coalesce_usecs = PCC_TX_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (test_bit(JME_FLAG_POLL, &jme->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) ecmd->use_adaptive_rx_coalesce = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ecmd->rx_coalesce_usecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) ecmd->rx_max_coalesced_frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) ecmd->use_adaptive_rx_coalesce = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) switch (jme->dpi.cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) case PCC_P1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) ecmd->rx_coalesce_usecs = PCC_P1_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) case PCC_P2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) ecmd->rx_coalesce_usecs = PCC_P2_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) case PCC_P3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) ecmd->rx_coalesce_usecs = PCC_P3_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct dynpcc_info *dpi = &(jme->dpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (ecmd->use_adaptive_rx_coalesce &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) test_bit(JME_FLAG_POLL, &jme->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) clear_bit(JME_FLAG_POLL, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) jme->jme_rx = netif_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) dpi->cur = PCC_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) dpi->attempt = PCC_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) dpi->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) jme_set_rx_pcc(jme, PCC_P1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) jme_interrupt_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) } else if (!(ecmd->use_adaptive_rx_coalesce) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) !(test_bit(JME_FLAG_POLL, &jme->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) set_bit(JME_FLAG_POLL, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) jme->jme_rx = netif_receive_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) jme_interrupt_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) jme_get_pauseparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) struct ethtool_pauseparam *ecmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) spin_lock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) spin_unlock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) ecmd->autoneg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) jme_set_pauseparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) struct ethtool_pauseparam *ecmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) (ecmd->tx_pause != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (ecmd->tx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) jme->reg_txpfc |= TXPFC_PF_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) jme->reg_txpfc &= ~TXPFC_PF_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) spin_lock_bh(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) (ecmd->rx_pause != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (ecmd->rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) jme->reg_rxmcs |= RXMCS_FLOWCTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) spin_unlock_bh(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) spin_lock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) (ecmd->autoneg != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) if (ecmd->autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) jme_mdio_write(jme->dev, jme->mii_if.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) MII_ADVERTISE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) spin_unlock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) jme_get_wol(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) wol->supported = WAKE_MAGIC | WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) wol->wolopts |= WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (jme->reg_pmcs & PMCS_MFEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) wol->wolopts |= WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) jme_set_wol(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (wol->wolopts & (WAKE_MAGICSECURE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) WAKE_UCAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) WAKE_MCAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) WAKE_BCAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) WAKE_ARP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) jme->reg_pmcs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (wol->wolopts & WAKE_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (wol->wolopts & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) jme->reg_pmcs |= PMCS_MFEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) jme_get_link_ksettings(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) spin_lock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) spin_unlock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) jme_set_link_ksettings(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) int rc, fdc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) if (cmd->base.speed == SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) cmd->base.autoneg != AUTONEG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) * Check If user changed duplex only while force_media.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) * Hardware would not generate link change interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (jme->mii_if.force_media &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) cmd->base.autoneg != AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) (jme->mii_if.full_duplex != cmd->base.duplex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) fdc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) spin_lock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) spin_unlock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (fdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) jme_reset_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) jme->old_cmd = *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) set_bit(JME_FLAG_SSET, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) struct mii_ioctl_data *mii_data = if_mii(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) unsigned int duplex_chg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (cmd == SIOCSMIIREG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) u16 val = mii_data->val_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) (val & BMCR_SPEED1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) spin_lock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) spin_unlock_bh(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (!rc && (cmd == SIOCSMIIREG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (duplex_chg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) jme_reset_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) jme_get_link_ksettings(netdev, &jme->old_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) set_bit(JME_FLAG_SSET, &jme->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) jme_get_link(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) jme_get_msglevel(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) return jme->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) jme_set_msglevel(struct net_device *netdev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) jme->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) static netdev_features_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) jme_fix_features(struct net_device *netdev, netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) if (netdev->mtu > 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) jme_set_features(struct net_device *netdev, netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) spin_lock_bh(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (features & NETIF_F_RXCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) jme->reg_rxmcs |= RXMCS_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) spin_unlock_bh(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) static void jme_netpoll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) jme_intr(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) jme_nway_reset(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) jme_restart_an(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) static u8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) jme_smb_read(struct jme_adapter *jme, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) int to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) val = jread32(jme, JME_SMBCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) to = JME_SMB_BUSY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) while ((val & SMBCSR_BUSY) && --to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) val = jread32(jme, JME_SMBCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (!to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) return 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) jwrite32(jme, JME_SMBINTF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) SMBINTF_HWRWN_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) SMBINTF_HWCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) val = jread32(jme, JME_SMBINTF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) to = JME_SMB_BUSY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) while ((val & SMBINTF_HWCMD) && --to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) val = jread32(jme, JME_SMBINTF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (!to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) return 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) int to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) val = jread32(jme, JME_SMBCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) to = JME_SMB_BUSY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) while ((val & SMBCSR_BUSY) && --to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) val = jread32(jme, JME_SMBCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (!to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) jwrite32(jme, JME_SMBINTF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) SMBINTF_HWRWN_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) SMBINTF_HWCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) val = jread32(jme, JME_SMBINTF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) to = JME_SMB_BUSY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) while ((val & SMBINTF_HWCMD) && --to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) val = jread32(jme, JME_SMBINTF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (!to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) mdelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) jme_get_eeprom_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) val = jread32(jme, JME_SMBCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) jme_get_eeprom(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) int i, offset = eeprom->offset, len = eeprom->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) * ethtool will check the boundary for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) eeprom->magic = JME_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) for (i = 0 ; i < len ; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) data[i] = jme_smb_read(jme, i + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) jme_set_eeprom(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) int i, offset = eeprom->offset, len = eeprom->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (eeprom->magic != JME_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) * ethtool will check the boundary for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) for (i = 0 ; i < len ; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) jme_smb_write(jme, i + offset, data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) static const struct ethtool_ops jme_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) ETHTOOL_COALESCE_MAX_FRAMES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) .get_drvinfo = jme_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) .get_regs_len = jme_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) .get_regs = jme_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) .get_coalesce = jme_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) .set_coalesce = jme_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) .get_pauseparam = jme_get_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) .set_pauseparam = jme_set_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) .get_wol = jme_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) .set_wol = jme_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) .get_link = jme_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) .get_msglevel = jme_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) .set_msglevel = jme_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) .nway_reset = jme_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) .get_eeprom_len = jme_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) .get_eeprom = jme_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) .set_eeprom = jme_set_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) .get_link_ksettings = jme_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) .set_link_ksettings = jme_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) jme_pci_dma64(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) jme_phy_init(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) u16 reg26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) jme_check_hw_ver(struct jme_adapter *jme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) u32 chipmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) chipmode = jread32(jme, JME_CHIPMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) jme->chip_main_rev = jme->chiprev & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) static const struct net_device_ops jme_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) .ndo_open = jme_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) .ndo_stop = jme_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) .ndo_do_ioctl = jme_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) .ndo_start_xmit = jme_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) .ndo_set_mac_address = jme_set_macaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) .ndo_set_rx_mode = jme_set_multi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) .ndo_change_mtu = jme_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) .ndo_tx_timeout = jme_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) .ndo_fix_features = jme_fix_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) .ndo_set_features = jme_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) .ndo_poll_controller = jme_netpoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) jme_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) int rc = 0, using_dac, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) struct jme_adapter *jme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) u16 bmcr, bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) u32 apmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) * set up PCI device basics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) PCIE_LINK_STATE_CLKPM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) pr_err("Cannot enable PCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) using_dac = jme_pci_dma64(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (using_dac < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) pr_err("Cannot set PCI DMA Mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) pr_err("No PCI resource region found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) rc = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) pr_err("Cannot obtain PCI resource region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) * alloc and init net device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) netdev = alloc_etherdev(sizeof(*jme));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (!netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) goto err_out_release_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) netdev->netdev_ops = &jme_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) netdev->ethtool_ops = &jme_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) netdev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) netdev->hw_features = NETIF_F_IP_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) NETIF_F_IPV6_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) NETIF_F_TSO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) NETIF_F_TSO6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) netdev->features = NETIF_F_IP_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) NETIF_F_IPV6_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) NETIF_F_TSO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) NETIF_F_TSO6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) NETIF_F_HW_VLAN_CTAG_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) if (using_dac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) netdev->features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) /* MTU range: 1280 - 9202*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) netdev->min_mtu = IPV6_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) SET_NETDEV_DEV(netdev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) pci_set_drvdata(pdev, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * init adapter info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) jme->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) jme->dev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) jme->jme_rx = netif_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) jme->old_mtu = netdev->mtu = 1500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) jme->phylink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) jme->tx_ring_size = 1 << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) jme->tx_ring_mask = jme->tx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) jme->tx_wake_threshold = 1 << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) jme->rx_ring_size = 1 << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) jme->rx_ring_mask = jme->rx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) jme->msg_enable = JME_DEF_MSG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) jme->regs = ioremap(pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) pci_resource_len(pdev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (!(jme->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) pr_err("Mapping PCI resource region error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) goto err_out_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) if (no_pseudohp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) jwrite32(jme, JME_APMC, apmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) } else if (force_pseudohp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) jwrite32(jme, JME_APMC, apmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) spin_lock_init(&jme->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) spin_lock_init(&jme->macaddr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) spin_lock_init(&jme->rxmcs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) atomic_set(&jme->link_changing, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) atomic_set(&jme->rx_cleaning, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) atomic_set(&jme->tx_cleaning, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) atomic_set(&jme->rx_empty, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) jme->dpi.cur = PCC_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) jme->reg_ghc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) jme->reg_rxcs = RXCS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) jme->reg_rxmcs = RXMCS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) jme->reg_txpfc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) jme->reg_pmcs = PMCS_MFEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) jme->reg_gpreg1 = GPREG1_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) if (jme->reg_rxmcs & RXMCS_CHECKSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) netdev->features |= NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) * Get Max Read Req Size from PCI Config Space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) jme->mrrs &= PCI_DCSR_MRRS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) switch (jme->mrrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) case MRRS_128B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) case MRRS_256B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) * Must check before reset_mac_processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) jme_check_hw_ver(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) jme->mii_if.dev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (jme->fpgaver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) jme->mii_if.phy_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) for (i = 1 ; i < 32 ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) bmcr = jme_mdio_read(netdev, i, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) bmsr = jme_mdio_read(netdev, i, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) jme->mii_if.phy_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) if (!jme->mii_if.phy_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) pr_err("Can not find phy_id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) jme->reg_ghc |= GHC_LINK_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) jme->mii_if.phy_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) jme->mii_if.supports_gmii = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) jme->mii_if.supports_gmii = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) jme->mii_if.phy_id_mask = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) jme->mii_if.reg_num_mask = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) jme->mii_if.mdio_read = jme_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) jme->mii_if.mdio_write = jme_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) jme_clear_pm_disable_wol(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) device_init_wakeup(&pdev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) jme_set_phyfifo_5level(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) jme->pcirev = pdev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (!jme->fpgaver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) jme_phy_init(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) jme_phy_off(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) * Reset MAC processor and reload EEPROM for MAC Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) jme_reset_mac_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) rc = jme_reload_eeprom(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) pr_err("Reload eeprom for reading MAC Address error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) jme_load_macaddr(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) * Tell stack that we are not ready to work until open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) rc = register_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) pr_err("Cannot register net device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) "JMC250 Gigabit Ethernet" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) "JMC260 Fast Ethernet" : "Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) (jme->fpgaver != 0) ? " (FPGA)" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) jme->pcirev, netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) err_out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) iounmap(jme->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) err_out_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) err_out_release_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) err_out_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) jme_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) unregister_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) iounmap(jme->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) jme_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) jme_powersave_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) pci_pme_active(pdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) jme_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (!netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) atomic_dec(&jme->link_changing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) netif_device_detach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) jme_stop_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) tasklet_disable(&jme->txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) tasklet_disable(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) tasklet_disable(&jme->rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (netif_carrier_ok(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) if (test_bit(JME_FLAG_POLL, &jme->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) jme_polling_mode(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) jme_stop_pcc_timer(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) jme_disable_rx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) jme_disable_tx_engine(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) jme_reset_mac_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) jme_free_rx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) jme_free_tx_resources(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) jme->phylink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) tasklet_enable(&jme->txclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) tasklet_enable(&jme->rxclean_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) tasklet_enable(&jme->rxempty_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) jme_powersave_phy(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) jme_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) struct jme_adapter *jme = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if (!netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) jme_clear_pm_disable_wol(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) jme_phy_on(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (test_bit(JME_FLAG_SSET, &jme->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) jme_set_link_ksettings(netdev, &jme->old_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) jme_reset_phy_processor(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) jme_phy_calibration(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) jme_phy_setEA(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) netif_device_attach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) atomic_inc(&jme->link_changing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) jme_reset_link(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) jme_start_irq(jme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) #define JME_PM_OPS (&jme_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) #define JME_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) static const struct pci_device_id jme_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) static struct pci_driver jme_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) .id_table = jme_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) .probe = jme_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) .remove = jme_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) .shutdown = jme_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) .driver.pm = JME_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) jme_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) return pci_register_driver(&jme_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) static void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) jme_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) pci_unregister_driver(&jme_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) module_init(jme_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) module_exit(jme_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) MODULE_VERSION(DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) MODULE_DEVICE_TABLE(pci, jme_pci_tbl);