^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2006, 2007 Maciej W. Rozycki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This driver is designed for the Broadcom SiByte SOC built-in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Updated to the driver model and the PHY abstraction layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * by Maciej W. Rozycki.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/processor.h> /* Processor type for cache alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Operational parameters that usually are not changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define TX_TIMEOUT (2*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* A few user-configurable values which may be modified when a driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) module is loaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* 1 normal messages, 0 quiet .. 7 verbose. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int debug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) module_param(debug, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MODULE_PARM_DESC(debug, "Debug messages");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int int_pktcnt_tx = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) module_param(int_pktcnt_tx, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int int_timeout_tx = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) module_param(int_timeout_tx, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int int_pktcnt_rx = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) module_param(int_pktcnt_rx, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static int int_timeout_rx = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) module_param(int_timeout_rx, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <asm/sibyte/board.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm/sibyte/sb1250.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <asm/sibyte/bcm1480_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <asm/sibyte/bcm1480_int.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <asm/sibyte/sb1250_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <asm/sibyte/sb1250_int.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #error invalid SiByte MAC configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <asm/sibyte/sb1250_scd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <asm/sibyte/sb1250_mac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <asm/sibyte/sb1250_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define UNIT_INT(n) (K_INT_MAC_0 + (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #error invalid SiByte MAC configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef K_INT_PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define SBMAC_PHY_INT K_INT_PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SBMAC_PHY_INT PHY_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Simple types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) enum sbmac_speed {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) sbmac_speed_none = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) sbmac_speed_10 = SPEED_10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) sbmac_speed_100 = SPEED_100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) sbmac_speed_1000 = SPEED_1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) enum sbmac_duplex {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sbmac_duplex_none = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) sbmac_duplex_half = DUPLEX_HALF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) sbmac_duplex_full = DUPLEX_FULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) enum sbmac_fc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sbmac_fc_none,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) sbmac_fc_disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) sbmac_fc_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sbmac_fc_collision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sbmac_fc_carrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) enum sbmac_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) sbmac_state_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) sbmac_state_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) sbmac_state_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) sbmac_state_broken,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) (d)->sbdma_dscrtable : (d)->f+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define NUMCACHEBLKS(x) DIV_ROUND_UP(x, SMP_CACHE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define SBMAC_MAX_TXDESCR 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define SBMAC_MAX_RXDESCR 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define ENET_PACKET_SIZE 1518
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*#define ENET_PACKET_SIZE 9216 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * DMA Descriptor structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct sbdmadscr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) uint64_t dscr_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) uint64_t dscr_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * DMA Controller structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct sbmacdma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * This stuff is used to identify the channel and the registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct sbmac_softc *sbdma_eth; /* back pointer to associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int sbdma_channel; /* channel number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int sbdma_txdir; /* direction (1=transmit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int sbdma_maxdescr; /* total # of descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) in ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int sbdma_int_pktcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* # descriptors rx/tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) before interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int sbdma_int_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* # usec rx/tx interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void __iomem *sbdma_config0; /* DMA config register 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void __iomem *sbdma_config1; /* DMA config register 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void __iomem *sbdma_dscrbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* descriptor base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void __iomem *sbdma_dscrcnt; /* descriptor count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void __iomem *sbdma_curdscr; /* current descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void __iomem *sbdma_oodpktlost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* pkt drop (rx only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * This stuff is for maintenance of the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void *sbdma_dscrtable_unaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct sbdmadscr *sbdma_dscrtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* base of descriptor table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct sbdmadscr *sbdma_dscrtable_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* end of descriptor table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sk_buff **sbdma_ctxtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* context table, one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) per descr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dma_addr_t sbdma_dscrtable_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* and also the phys addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct sbdmadscr *sbdma_remptr; /* next dscr for sw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) to remove */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Ethernet softc structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct sbmac_softc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Linux-specific things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct net_device *sbm_dev; /* pointer to linux device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct phy_device *phy_dev; /* the associated PHY device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct mii_bus *mii_bus; /* the MII bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spinlock_t sbm_lock; /* spin lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int sbm_devflags; /* current device flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Controller-specific things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) void __iomem *sbm_base; /* MAC's base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enum sbmac_state sbm_state; /* current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void __iomem *sbm_macenable; /* MAC Enable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) void __iomem *sbm_maccfg; /* MAC Config Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void __iomem *sbm_fifocfg; /* FIFO Config Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) void __iomem *sbm_framecfg; /* Frame Config Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void __iomem *sbm_rxfilter; /* Receive Filter Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void __iomem *sbm_isr; /* Interrupt Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void __iomem *sbm_imr; /* Interrupt Mask Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void __iomem *sbm_mdio; /* MDIO Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) enum sbmac_speed sbm_speed; /* current speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) enum sbmac_duplex sbm_duplex; /* current duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) enum sbmac_fc sbm_fc; /* cur. flow control setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int sbm_pause; /* current pause setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int sbm_link; /* current link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned char sbm_hwaddr[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct sbmacdma sbm_txdma; /* only channel 0 for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct sbmacdma sbm_rxdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int rx_hw_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int sbe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Externs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Prototypes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int txrx, int maxdescr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct sk_buff *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void sbdma_emptyring(struct sbmacdma *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int work_to_do, int poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int sbmac_initctx(struct sbmac_softc *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void sbmac_channel_start(struct sbmac_softc *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void sbmac_channel_stop(struct sbmac_softc *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) enum sbmac_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static uint64_t sbmac_addr2reg(unsigned char *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static irqreturn_t sbmac_intr(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void sbmac_setmulti(struct sbmac_softc *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int sbmac_init(struct platform_device *pldev, long long base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) enum sbmac_fc fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int sbmac_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void sbmac_set_rx_mode(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int sbmac_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int sbmac_poll(struct napi_struct *napi, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void sbmac_mii_poll(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int sbmac_mii_probe(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void sbmac_mii_sync(void __iomem *sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int bitcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u16 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Globals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static char sbmac_string[] = "sb1250-mac";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static char sbmac_mdio_string[] = "sb1250-mac-mdio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * MDIO constants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define MII_COMMAND_START 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define MII_COMMAND_READ 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define MII_COMMAND_WRITE 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define MII_COMMAND_ACK 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define ENABLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #define DISABLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * SBMAC_MII_SYNC(sbm_mdio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Synchronize with the MII - send a pattern of bits to the MII
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * that will guarantee that it is ready to accept a command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * sbm_mdio - address of the MAC's MDIO register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void sbmac_mii_sync(void __iomem *sbm_mdio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) uint64_t bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int mac_mdio_genc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) for (cnt = 0; cnt < 32; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Send some bits to the MII. The bits to be sent are right-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * justified in the 'data' parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * sbm_mdio - address of the MAC's MDIO register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * data - data to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * bitcnt - number of bits to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int bitcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) uint64_t bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned int curmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int mac_mdio_genc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) bits = M_MAC_MDIO_DIR_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) curmask = 1 << (bitcnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) for (i = 0; i < bitcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (data & curmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bits |= M_MAC_MDIO_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) else bits &= ~M_MAC_MDIO_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) curmask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * SBMAC_MII_READ(bus, phyaddr, regidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Read a PHY register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * bus - MDIO bus handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * phyaddr - PHY's address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * regnum - index of register to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * value read, or 0xffff if an error occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) void __iomem *sbm_mdio = sc->sbm_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int mac_mdio_genc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Synchronize ourselves so that the PHY knows the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * thing coming down is a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) sbmac_mii_sync(sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Send the data to the PHY. The sequence is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * a "start" command (2 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * a "read" command (2 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * the PHY addr (5 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * the register index (5 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sbmac_mii_senddata(sbm_mdio, regidx, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Switch the port around without a clock transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Send out a clock pulse to signal we want the status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * If an error occurred, the PHY will signal '1' back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Issue an 'idle' clock pulse, but keep the direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) regval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) for (idx = 0; idx < 16; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) regval <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (error == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) regval |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Switch back to output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (error == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Write a value to a PHY register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * bus - MDIO bus handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * phyaddr - PHY to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * regidx - register within the PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * regval - data to write to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * 0 for success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u16 regval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void __iomem *sbm_mdio = sc->sbm_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int mac_mdio_genc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sbmac_mii_sync(sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) sbmac_mii_senddata(sbm_mdio, regidx, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) sbmac_mii_senddata(sbm_mdio, regval, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Initialize a DMA channel context. Since there are potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * eight DMA channels per MAC, it's nice to do this in a standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * d - struct sbmacdma (DMA channel context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * s - struct sbmac_softc (pointer to a MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * chan - channel number (0..1 right now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * txrx - Identifies DMA_TX or DMA_RX for channel direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * maxdescr - number of descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int txrx, int maxdescr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int int_pktcnt, int_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Save away interesting stuff in the structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) d->sbdma_eth = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) d->sbdma_channel = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) d->sbdma_txdir = txrx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* RMON clearing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * initialize register pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) d->sbdma_config0 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) d->sbdma_config1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) d->sbdma_dscrbase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) d->sbdma_dscrcnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) d->sbdma_curdscr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (d->sbdma_txdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) d->sbdma_oodpktlost = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) d->sbdma_oodpktlost =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Allocate memory for the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) d->sbdma_maxdescr = maxdescr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sizeof(*d->sbdma_dscrtable),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * The descriptor table must be aligned to at least 16 bytes or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * MAC will corrupt it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) d->sbdma_dscrtable = (struct sbdmadscr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ALIGN((unsigned long)d->sbdma_dscrtable_unaligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sizeof(*d->sbdma_dscrtable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * And context table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sizeof(*d->sbdma_ctxtable), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Setup Rx/Tx DMA coalescing defaults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if ( int_pktcnt ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) d->sbdma_int_pktcnt = int_pktcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) d->sbdma_int_pktcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if ( int_timeout ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) d->sbdma_int_timeout = int_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) d->sbdma_int_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * SBDMA_CHANNEL_START(d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * Initialize the hardware registers for a DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * d - DMA channel to init (context must be previously init'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * rxtx - DMA_RX or DMA_TX depending on what type of channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static void sbdma_channel_start(struct sbmacdma *d, int rxtx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Turn on the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 0, d->sbdma_config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) __raw_writeq(M_DMA_EOP_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) V_DMA_RINGSZ(d->sbdma_maxdescr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 0, d->sbdma_config0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __raw_writeq(0, d->sbdma_config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 0, d->sbdma_config0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Initialize ring pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) d->sbdma_addptr = d->sbdma_dscrtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) d->sbdma_remptr = d->sbdma_dscrtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * SBDMA_CHANNEL_STOP(d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Initialize the hardware registers for a DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * d - DMA channel to init (context must be previously init'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void sbdma_channel_stop(struct sbmacdma *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Turn off the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) __raw_writeq(0, d->sbdma_config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) __raw_writeq(0, d->sbdma_dscrbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) __raw_writeq(0, d->sbdma_config0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Zero ring pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) d->sbdma_addptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) d->sbdma_remptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static inline void sbdma_align_skb(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned int power2, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned char *addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned char *newaddr = PTR_ALIGN(addr, power2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) skb_reserve(skb, newaddr - addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * SBDMA_ADD_RCVBUFFER(d,sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Add a buffer to the specified DMA channel. For receive channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * this queues a buffer for inbound packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * sc - softc structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * d - DMA channel descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * sb - sk_buff to add, or NULL if we should allocate one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * 0 if buffer could not be added (ring is full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * 1 if buffer added successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct sk_buff *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct net_device *dev = sc->sbm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct sbdmadscr *dsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct sbdmadscr *nextdsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct sk_buff *sb_new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int pktsize = ENET_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* get pointer to our current place in the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dsc = d->sbdma_addptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * figure out if the ring is full - if the next descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * is the same as the one that we're going to remove from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * the ring, the ring is full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (nextdsc == d->sbdma_remptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Allocate a sk_buff if we don't already have one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * If we do have an sk_buff, reset it so that it's empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Note: sk_buffs don't seem to be guaranteed to have any sort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * of alignment when they are allocated. Therefore, allocate enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * extra space to make sure that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * 1. the data does not start in the middle of a cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * 2. The data does not end in the middle of a cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * 3. The buffer can be aligned such that the IP addresses are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * naturally aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * Remember, the SOCs MAC writes whole cache lines at a time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * without reading the old contents first. So, if the sk_buff's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * data portion starts in the middle of a cache line, the SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * DMA will trash the beginning (and ending) portions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (sb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) SMP_CACHE_BYTES * 2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (sb_new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) sb_new = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * nothing special to reinit buffer, it's already aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * and sb->data already points to a good place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * fill in the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Do not interrupt per DMA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) dsc->dscr_a = virt_to_phys(sb_new->data) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dsc->dscr_a = virt_to_phys(sb_new->data) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) M_DMA_DSCRA_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* receiving: no options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dsc->dscr_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * fill in the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * point at next packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) d->sbdma_addptr = nextdsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Give the buffer to the DMA engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) __raw_writeq(1, d->sbdma_dscrcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return 0; /* we did it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * SBDMA_ADD_TXBUFFER(d,sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Add a transmit buffer to the specified DMA channel, causing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * transmit to start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * d - DMA channel descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * sb - sk_buff to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * 0 transmit queued successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct sbdmadscr *dsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct sbdmadscr *nextdsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) uint64_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) uint64_t ncb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* get pointer to our current place in the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dsc = d->sbdma_addptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * figure out if the ring is full - if the next descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * is the same as the one that we're going to remove from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * the ring, the ring is full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (nextdsc == d->sbdma_remptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * Under Linux, it's not necessary to copy/coalesce buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * like it is on NetBSD. We think they're all contiguous,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * but that may not be true for GBE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) length = sb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * fill in the descriptor. Note that the number of cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * blocks in the descriptor is the number of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * *spanned*, so we need to add in the offset (if any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * while doing the calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) phys = virt_to_phys(sb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dsc->dscr_a = phys |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) V_DMA_DSCRA_A_SIZE(ncb) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) #ifndef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) M_DMA_DSCRA_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) M_DMA_ETHTX_SOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* transmitting: set outbound options and length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) V_DMA_DSCRB_PKT_SIZE(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * fill in the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * point at next packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) d->sbdma_addptr = nextdsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Give the buffer to the DMA engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) __raw_writeq(1, d->sbdma_dscrcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return 0; /* we did it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * SBDMA_EMPTYRING(d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * Free all allocated sk_buffs on the specified DMA channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * d - DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static void sbdma_emptyring(struct sbmacdma *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct sk_buff *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) sb = d->sbdma_ctxtable[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) dev_kfree_skb(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) d->sbdma_ctxtable[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * SBDMA_FILLRING(d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Fill the specified DMA channel (must be receive channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * with sk_buffs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * sc - softc structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * d - DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static void sbmac_netpoll(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct sbmac_softc *sc = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int irq = sc->sbm_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) __raw_writeq(0, sc->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) sbmac_intr(irq, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sc->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * Process "completed" receive buffers on the specified DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * sc - softc structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * d - DMA channel context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * work_to_do - no. of packets to process before enabling interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * again (for NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * poll - 1: using polling (for NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int work_to_do, int poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct net_device *dev = sc->sbm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) int curidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int hwidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct sbdmadscr *dsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct sk_buff *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) prefetch(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Check if the HW dropped any frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) dev->stats.rx_fifo_errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) while (work_to_do-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * figure out where we are (as an index) and where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * the hardware is (also as an index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * This could be done faster if (for example) the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * descriptor table was page-aligned and contiguous in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * both virtual and physical memory -- you could then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * just compare the low-order bits of the virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dsc = d->sbdma_remptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) curidx = dsc - d->sbdma_dscrtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) prefetch(dsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) prefetch(&d->sbdma_ctxtable[curidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) d->sbdma_dscrtable_phys) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) sizeof(*d->sbdma_dscrtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * If they're the same, that means we've processed all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * of the descriptors up to (but not including) the one that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * the hardware is working on right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (curidx == hwidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * Otherwise, get the packet's sk_buff ptr back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) sb = d->sbdma_ctxtable[curidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) d->sbdma_ctxtable[curidx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * Check packet status. If good, process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * If not, silently drop it and put it back on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * receive ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Add a new buffer to replace the old one. If we fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * to allocate a buffer, we're going to drop this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * packet and put it right back on the receive ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) -ENOBUFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Re-add old buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) sbdma_add_rcvbuffer(sc, d, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* No point in continuing at the moment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) printk(KERN_ERR "dropped packet (1)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * Set length into the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) skb_put(sb,len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * Buffer has been replaced on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * receive ring. Pass the buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* Check hw IPv4/TCP checksum if supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (sc->rx_hw_checksum == ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) sb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* don't need to set sb->csum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) skb_checksum_none_assert(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) prefetch(sb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) prefetch((const void *)(((char *)sb->data)+32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dropped = netif_receive_skb(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dropped = netif_rx(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (dropped == NET_RX_DROP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) dev->stats.rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * Packet was mangled somehow. Just drop it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * put it back on the receive ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) sbdma_add_rcvbuffer(sc, d, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * .. and advance to the next buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) work_to_do = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) goto again; /* collect fifo drop statistics again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * SBDMA_TX_PROCESS(sc,d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * Process "completed" transmit buffers on the specified DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * This is normally called within the interrupt service routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Note that this isn't really ideal for priority channels, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * it processes all of the packets on a given channel before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * sc - softc structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * d - DMA channel context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * poll - 1: using polling (for NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct net_device *dev = sc->sbm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) int curidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) int hwidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct sbdmadscr *dsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct sk_buff *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int packets_handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) spin_lock_irqsave(&(sc->sbm_lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (d->sbdma_remptr == d->sbdma_addptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) goto end_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * figure out where we are (as an index) and where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * the hardware is (also as an index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * This could be done faster if (for example) the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * descriptor table was page-aligned and contiguous in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * both virtual and physical memory -- you could then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * just compare the low-order bits of the virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) curidx = d->sbdma_remptr - d->sbdma_dscrtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * If they're the same, that means we've processed all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * of the descriptors up to (but not including) the one that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * the hardware is working on right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (curidx == hwidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * Otherwise, get the packet's sk_buff ptr back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) dsc = &(d->sbdma_dscrtable[curidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) sb = d->sbdma_ctxtable[curidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) d->sbdma_ctxtable[curidx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dev->stats.tx_bytes += sb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * for transmits, we just free buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) dev_consume_skb_irq(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * .. and advance to the next buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) packets_handled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * Decide if we should wake up the protocol or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * Other drivers seem to do this when we reach a low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * watermark on the transmit queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (packets_handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) netif_wake_queue(d->sbdma_eth->sbm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) end_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) spin_unlock_irqrestore(&(sc->sbm_lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * SBMAC_INITCTX(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * Initialize an Ethernet context structure - this is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * once per MAC on the 1250. Memory is allocated here, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * call it again from inside the ioctl routines that bring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * interface up/down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * s - sbmac context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) static int sbmac_initctx(struct sbmac_softc *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * figure out the addresses of some ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) s->sbm_isr = s->sbm_base + R_MAC_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Initialize the DMA channels. Right now, only one per MAC is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Note: Only do this _once_, as it allocates memory from the kernel!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * initial state is OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) s->sbm_state = sbmac_state_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void sbdma_uninitctx(struct sbmacdma *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) kfree(d->sbdma_dscrtable_unaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) kfree(d->sbdma_ctxtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) d->sbdma_ctxtable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static void sbmac_uninitctx(struct sbmac_softc *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) sbdma_uninitctx(&(sc->sbm_txdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) sbdma_uninitctx(&(sc->sbm_rxdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * SBMAC_CHANNEL_START(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * Start packet processing on this MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * s - sbmac structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static void sbmac_channel_start(struct sbmac_softc *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) uint64_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) void __iomem *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) uint64_t cfg,fifo,framecfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int idx, th_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * Don't do this if running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (s->sbm_state == sbmac_state_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * Bring the controller out of reset, but leave it off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) __raw_writeq(0, s->sbm_macenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * Ignore all received packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) __raw_writeq(0, s->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * Calculate values for various control registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) cfg = M_MAC_RETRY_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) M_MAC_TX_HOLD_SOP_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) V_MAC_TX_PAUSE_CNT_16K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) M_MAC_AP_STAT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) M_MAC_FAST_SYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) M_MAC_SS_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * Use a larger RD_THRSH for gigabit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) th_value = 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) th_value = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ((s->sbm_speed == sbmac_speed_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) V_MAC_TX_RL_THRSH(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) V_MAC_RX_PL_THRSH(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) V_MAC_RX_RL_THRSH(8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) V_MAC_MAX_FRAMESZ_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) V_MAC_BACKOFF_SEL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * Clear out the hash address map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) port = s->sbm_base + R_MAC_HASH_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) __raw_writeq(0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) port += sizeof(uint64_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * Clear out the exact-match table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) port = s->sbm_base + R_MAC_ADDR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) __raw_writeq(0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) port += sizeof(uint64_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * Clear out the DMA Channel mapping table registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) port = s->sbm_base + R_MAC_CHUP0_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) __raw_writeq(0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) port += sizeof(uint64_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) port = s->sbm_base + R_MAC_CHLO0_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) __raw_writeq(0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) port += sizeof(uint64_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * Program the hardware address. It goes into the hardware-address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * register as well as the first filter register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) reg = sbmac_addr2reg(s->sbm_hwaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) port = s->sbm_base + R_MAC_ADDR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) __raw_writeq(reg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) port = s->sbm_base + R_MAC_ETHERNET_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) __raw_writeq(reg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * Set the receive filter for no packets, and write values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * to the various config registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) __raw_writeq(0, s->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) __raw_writeq(0, s->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) __raw_writeq(framecfg, s->sbm_framecfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) __raw_writeq(fifo, s->sbm_fifocfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) __raw_writeq(cfg, s->sbm_maccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * Initialize DMA channels (rings should be ok now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * Configure the speed, duplex, and flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) sbmac_set_speed(s,s->sbm_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * Fill the receive ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) sbdma_fillring(s, &(s->sbm_rxdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * Turn on the rest of the bits in the enable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) __raw_writeq(M_MAC_RXDMA_EN0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) M_MAC_TXDMA_EN0, s->sbm_macenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) __raw_writeq(M_MAC_RXDMA_EN0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) M_MAC_TXDMA_EN0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) M_MAC_RX_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) M_MAC_TX_ENABLE, s->sbm_macenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) #error invalid SiByte MAC configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * Enable receiving unicasts and broadcasts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * we're running now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) s->sbm_state = sbmac_state_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * Program multicast addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) sbmac_setmulti(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * If channel was in promiscuous mode before, turn that on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (s->sbm_devflags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) sbmac_promiscuous_mode(s,1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * SBMAC_CHANNEL_STOP(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * Stop packet processing on this MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * s - sbmac structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static void sbmac_channel_stop(struct sbmac_softc *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /* don't do this if already stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (s->sbm_state == sbmac_state_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* don't accept any packets, disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) __raw_writeq(0, s->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) __raw_writeq(0, s->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) /* Turn off ticker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /* turn off receiver and transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) __raw_writeq(0, s->sbm_macenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) /* We're stopped now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) s->sbm_state = sbmac_state_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * Stop DMA channels (rings should be ok now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) sbdma_channel_stop(&(s->sbm_rxdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) sbdma_channel_stop(&(s->sbm_txdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /* Empty the receive and transmit rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) sbdma_emptyring(&(s->sbm_rxdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) sbdma_emptyring(&(s->sbm_txdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * SBMAC_SET_CHANNEL_STATE(state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * Set the channel's state ON or OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * state - new state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * old state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) enum sbmac_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) enum sbmac_state oldstate = sc->sbm_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * If same as previous state, return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (state == oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return oldstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * If new state is ON, turn channel on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (state == sbmac_state_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) sbmac_channel_start(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) sbmac_channel_stop(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * Return previous state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return oldstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * SBMAC_PROMISCUOUS_MODE(sc,onoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * Turn on or off promiscuous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * sc - softc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * onoff - 1 to turn on, 0 to turn off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) uint64_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (sc->sbm_state != sbmac_state_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (onoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) reg = __raw_readq(sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) reg |= M_MAC_ALLPKT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) __raw_writeq(reg, sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) reg = __raw_readq(sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) reg &= ~M_MAC_ALLPKT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) __raw_writeq(reg, sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * SBMAC_SETIPHDR_OFFSET(sc,onoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * Set the iphdr offset as 15 assuming ethernet encapsulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * sc - softc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) uint64_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /* Hard code the off set to 15 for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) reg = __raw_readq(sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) __raw_writeq(reg, sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /* BCM1250 pass1 didn't have hardware checksum. Everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) later does. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) sc->rx_hw_checksum = DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) sc->rx_hw_checksum = ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * SBMAC_ADDR2REG(ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * Convert six bytes into the 64-bit register value that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * we typically write into the SBMAC's address/mcast registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * ptr - pointer to 6 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static uint64_t sbmac_addr2reg(unsigned char *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) uint64_t reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) ptr += 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) reg |= (uint64_t) *(--ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) reg |= (uint64_t) *(--ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) reg |= (uint64_t) *(--ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) reg |= (uint64_t) *(--ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) reg |= (uint64_t) *(--ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) reg |= (uint64_t) *(--ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * SBMAC_SET_SPEED(s,speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * Configure LAN speed for the specified MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * Warning: must be called when MAC is off!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * s - sbmac structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * speed - speed to set MAC to (see enum sbmac_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * 1 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * 0 indicates invalid parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) uint64_t cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) uint64_t framecfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * Save new current values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) s->sbm_speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (s->sbm_state == sbmac_state_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return 0; /* save for next restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * Read current register values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) cfg = __raw_readq(s->sbm_maccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) framecfg = __raw_readq(s->sbm_framecfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * Mask out the stuff we want to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) M_MAC_SLOT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * Now add in the new bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) case sbmac_speed_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) framecfg |= V_MAC_IFG_RX_10 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) V_MAC_IFG_TX_10 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) K_MAC_IFG_THRSH_10 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) V_MAC_SLOT_SIZE_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) cfg |= V_MAC_SPEED_SEL_10MBPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) case sbmac_speed_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) framecfg |= V_MAC_IFG_RX_100 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) V_MAC_IFG_TX_100 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) V_MAC_IFG_THRSH_100 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) V_MAC_SLOT_SIZE_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) cfg |= V_MAC_SPEED_SEL_100MBPS ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) case sbmac_speed_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) framecfg |= V_MAC_IFG_RX_1000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) V_MAC_IFG_TX_1000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) V_MAC_IFG_THRSH_1000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) V_MAC_SLOT_SIZE_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * Send the bits back to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) __raw_writeq(framecfg, s->sbm_framecfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) __raw_writeq(cfg, s->sbm_maccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * SBMAC_SET_DUPLEX(s,duplex,fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * Set Ethernet duplex and flow control options for this MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * Warning: must be called when MAC is off!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * s - sbmac structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * duplex - duplex setting (see enum sbmac_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * fc - flow control setting (see enum sbmac_fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * 1 if ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * 0 if an invalid parameter combination was specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) enum sbmac_fc fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) uint64_t cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * Save new current values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) s->sbm_duplex = duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) s->sbm_fc = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (s->sbm_state == sbmac_state_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return 0; /* save for next restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * Read current register values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) cfg = __raw_readq(s->sbm_maccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * Mask off the stuff we're about to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) switch (duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) case sbmac_duplex_half:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) switch (fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) case sbmac_fc_disabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) case sbmac_fc_collision:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) case sbmac_fc_carrier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) case sbmac_fc_frame: /* not valid in half duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) default: /* invalid selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) case sbmac_duplex_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) switch (fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) case sbmac_fc_disabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) cfg |= V_MAC_FC_CMD_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) case sbmac_fc_frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) cfg |= V_MAC_FC_CMD_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) case sbmac_fc_collision: /* not valid in full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) case sbmac_fc_carrier: /* not valid in full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * Send the bits back to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) __raw_writeq(cfg, s->sbm_maccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * SBMAC_INTR()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * Interrupt handler for MAC interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * MAC structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static irqreturn_t sbmac_intr(int irq,void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct net_device *dev = (struct net_device *) dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) uint64_t isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * Read the ISR (this clears the bits in the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * register, except for counter addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (isr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return IRQ_RETVAL(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * Transmits on channel 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (napi_schedule_prep(&sc->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) __raw_writeq(0, sc->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) __napi_schedule(&sc->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /* Depend on the exit from poll to reenable intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /* may leave some packets behind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) sbdma_rx_process(sc,&(sc->sbm_rxdma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) SBMAC_MAX_RXDESCR * 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * SBMAC_START_TX(skb,dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * Start output on the specified interface. Basically, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * queue as many buffers as we can until the ring fills up, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * we run off the end of the queue, whichever comes first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /* lock eth irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) spin_lock_irqsave(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * Put the buffer on the transmit ring. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * don't have room, stop the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /* XXX save skb that we could not send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) spin_unlock_irqrestore(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) spin_unlock_irqrestore(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * SBMAC_SETMULTI(sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * Reprogram the multicast table into the hardware, given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * the list of multicasts associated with the interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * sc - softc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static void sbmac_setmulti(struct sbmac_softc *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) uint64_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) void __iomem *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) struct net_device *dev = sc->sbm_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * Clear out entire multicast table. We do this by nuking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * the entire hash table and all the direct matches except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * the first one, which is used for our station address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) __raw_writeq(0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) __raw_writeq(0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * Clear the filter to say we don't want any multicasts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) reg = __raw_readq(sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) __raw_writeq(reg, sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * Enable ALL multicasts. Do this by inverting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * multicast enable bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) reg = __raw_readq(sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) __raw_writeq(reg, sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * Progam new multicast entries. For now, only use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * perfect filter. In the future we'll need to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * hash filter if the perfect filter overflows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /* XXX only using perfect filter for now, need to use hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * XXX if the table overflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) idx = 1; /* skip station address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (idx == MAC_ADDR_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) reg = sbmac_addr2reg(ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) __raw_writeq(reg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * Enable the "accept multicast bits" if we programmed at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * multicast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (idx > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) reg = __raw_readq(sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) reg |= M_MAC_MCAST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) __raw_writeq(reg, sc->sbm_rxfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static const struct net_device_ops sbmac_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) .ndo_open = sbmac_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) .ndo_stop = sbmac_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) .ndo_start_xmit = sbmac_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) .ndo_set_rx_mode = sbmac_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) .ndo_tx_timeout = sbmac_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) .ndo_do_ioctl = sbmac_mii_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .ndo_poll_controller = sbmac_netpoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /**********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * SBMAC_INIT(dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) * Attach routine - init hardware and hook ourselves into linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * Input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * dev - net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) ********************************************************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) static int sbmac_init(struct platform_device *pldev, long long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct net_device *dev = platform_get_drvdata(pldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) int idx = pldev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) unsigned char *eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) uint64_t ea_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) sc->sbm_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) sc->sbe_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) eaddr = sc->sbm_hwaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * Read the ethernet address. The firmware left this programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * for us in the ethernet address register for each mac.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) eaddr[i] = (uint8_t) (ea_reg & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) ea_reg >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) dev->dev_addr[i] = eaddr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * Initialize context (get pointers to registers and stuff), then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * allocate the memory for the descriptor tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) sbmac_initctx(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) * Set up Linux device callins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) spin_lock_init(&(sc->sbm_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) dev->netdev_ops = &sbmac_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) dev->min_mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) dev->max_mtu = ENET_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) dev->irq = UNIT_INT(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /* This is needed for PASS2 for Rx H/W checksum feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) sbmac_set_iphdr_offset(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) sc->mii_bus = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (sc->mii_bus == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) goto uninit_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) sc->mii_bus->name = sbmac_mdio_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) pldev->name, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) sc->mii_bus->priv = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) sc->mii_bus->read = sbmac_mii_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) sc->mii_bus->write = sbmac_mii_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) sc->mii_bus->parent = &pldev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * Probe PHY address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) err = mdiobus_register(sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) printk(KERN_ERR "%s: unable to register MDIO bus\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) goto free_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) platform_set_drvdata(pldev, sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) printk(KERN_ERR "%s.%d: unable to register netdev\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) sbmac_string, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) goto unreg_mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (sc->rx_hw_checksum == ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) pr_info("%s: enabling TCP rcv checksum\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * Display Ethernet address (this is called during the config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * process so we need to finish off the config message that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * was being displayed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) dev->name, base, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) unreg_mdio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) mdiobus_unregister(sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) free_mdio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) mdiobus_free(sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) uninit_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) sbmac_uninitctx(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static int sbmac_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * map/route interrupt (clear status first, in case something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * weird is pending; we haven't initialized the mac registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * yet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) __raw_readq(sc->sbm_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) sc->sbm_speed = sbmac_speed_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) sc->sbm_duplex = sbmac_duplex_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) sc->sbm_fc = sbmac_fc_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) sc->sbm_pause = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) sc->sbm_link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) * Attach to the PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) err = sbmac_mii_probe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * Turn on the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) sbmac_set_channel_state(sc,sbmac_state_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) sbmac_set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) phy_start(sc->phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) napi_enable(&sc->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static int sbmac_mii_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct phy_device *phy_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) phy_dev = phy_find_first(sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (!phy_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) printk(KERN_ERR "%s: no PHY found\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) phy_dev = phy_connect(dev, dev_name(&phy_dev->mdio.dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) &sbmac_mii_poll, PHY_INTERFACE_MODE_GMII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (IS_ERR(phy_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return PTR_ERR(phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /* Remove any features not supported by the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) phy_set_max_speed(phy_dev, SPEED_1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) phy_support_asym_pause(phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) phy_attached_info(phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) sc->phy_dev = phy_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static void sbmac_mii_poll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) struct phy_device *phy_dev = sc->phy_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) enum sbmac_fc fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) link_chg = (sc->sbm_link != phy_dev->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) speed_chg = (sc->sbm_speed != phy_dev->speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) duplex_chg = (sc->sbm_duplex != phy_dev->duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) pause_chg = (sc->sbm_pause != phy_dev->pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (!link_chg && !speed_chg && !duplex_chg && !pause_chg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) return; /* Hmmm... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (!phy_dev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (link_chg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) sc->sbm_link = phy_dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) sc->sbm_speed = sbmac_speed_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) sc->sbm_duplex = sbmac_duplex_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) sc->sbm_fc = sbmac_fc_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) sc->sbm_pause = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) pr_info("%s: link unavailable\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (phy_dev->duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (phy_dev->pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) fc = sbmac_fc_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) fc = sbmac_fc_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) fc = sbmac_fc_collision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) fc_chg = (sc->sbm_fc != fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) spin_lock_irqsave(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) sc->sbm_speed = phy_dev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) sc->sbm_duplex = phy_dev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) sc->sbm_fc = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) sc->sbm_pause = phy_dev->pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) sc->sbm_link = phy_dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if ((speed_chg || duplex_chg || fc_chg) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) sc->sbm_state != sbmac_state_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * something changed, restart the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) pr_debug("%s: restarting channel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) "because PHY state changed\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) sbmac_channel_stop(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) sbmac_channel_start(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) spin_unlock_irqrestore(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) spin_lock_irqsave(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) spin_unlock_irqrestore(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) static void sbmac_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) spin_lock_irqsave(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * Promiscuous changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) sbmac_promiscuous_mode(sc,1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) sbmac_promiscuous_mode(sc,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) spin_unlock_irqrestore(&sc->sbm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) * Program the multicasts. Do this every time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) sbmac_setmulti(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (!netif_running(dev) || !sc->phy_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) return phy_mii_ioctl(sc->phy_dev, rq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static int sbmac_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) napi_disable(&sc->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) phy_stop(sc->phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) sbmac_set_channel_state(sc, sbmac_state_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) pr_debug("%s: Shutting down ethercard\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) phy_disconnect(sc->phy_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) sc->phy_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) sbdma_emptyring(&(sc->sbm_txdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) sbdma_emptyring(&(sc->sbm_rxdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static int sbmac_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) #ifdef CONFIG_SBMAC_COALESCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) sc->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) static int sbmac_probe(struct platform_device *pldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) struct sbmac_softc *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) void __iomem *sbm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) u64 sbmac_orig_hwaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) BUG_ON(!res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) sbm_base = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (!sbm_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) printk(KERN_ERR "%s: unable to map device registers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) dev_name(&pldev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) goto out_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * value for us by the firmware if we're going to use this MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * If we find a zero, skip this MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (sbmac_orig_hwaddr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * Okay, cool. Initialize this MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) dev = alloc_etherdev(sizeof(struct sbmac_softc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) platform_set_drvdata(pldev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) SET_NETDEV_DEV(dev, &pldev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) sc->sbm_base = sbm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) err = sbmac_init(pldev, res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) out_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) iounmap(sbm_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) out_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) static int sbmac_remove(struct platform_device *pldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) struct net_device *dev = platform_get_drvdata(pldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) struct sbmac_softc *sc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) sbmac_uninitctx(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) mdiobus_unregister(sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) mdiobus_free(sc->mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) iounmap(sc->sbm_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) static struct platform_driver sbmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) .probe = sbmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) .remove = sbmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) .name = sbmac_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) module_platform_driver(sbmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) MODULE_LICENSE("GPL");