^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * sonic.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) 2005 Finn Thain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Converted to DMA API, added zero-copy buffer handling, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (from the mac68k project) introduced dhd's support for 16-bit cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This driver is based on work from Andreas Busse, but most of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * the code is rewritten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Core code included by system sonic drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * And... partially rewritten again by David Huggins-Daines in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * to cope with screwed up Macintosh NICs that may or may not use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * 16-bit DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * (C) 1999 David Huggins-Daines <dhd@debian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * National Semiconductors data sheet for the DP83932B Sonic Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * controller, and the files "8390.c" and "skeleton.c" in this directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static unsigned int version_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int sonic_debug = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) module_param(sonic_debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) MODULE_PARM_DESC(sonic_debug, "debug message level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void sonic_msg_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) lp->msg_enable = netif_msg_init(sonic_debug, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (version_printed++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) netif_dbg(lp, drv, dev, "%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int sonic_alloc_descriptors(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Allocate a chunk of memory for the descriptors. Note that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * must not cross a 64K boundary. It is smaller than one page which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * means that page alignment is a sufficient condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) lp->descriptors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) dma_alloc_coherent(lp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) SIZEOF_SONIC_DESC *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) SONIC_BUS_SCALE(lp->dma_bitmode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) &lp->descriptors_laddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!lp->descriptors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) lp->cda = lp->descriptors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) lp->tda = lp->cda + SIZEOF_SONIC_CDA *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) SONIC_BUS_SCALE(lp->dma_bitmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) SONIC_BUS_SCALE(lp->dma_bitmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) SONIC_BUS_SCALE(lp->dma_bitmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) lp->cda_laddr = lp->descriptors_laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) SONIC_BUS_SCALE(lp->dma_bitmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) SONIC_BUS_SCALE(lp->dma_bitmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) SONIC_BUS_SCALE(lp->dma_bitmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Open/initialize the SONIC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * This routine should set everything up anew at each open, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * registers that "should" only need to be set once at boot, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * there is non-reboot way to recover if something goes wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int sonic_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) for (i = 0; i < SONIC_NUM_RRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) while(i > 0) { /* free any that were allocated successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dev_kfree_skb(lp->rx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) lp->rx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* align IP header unless DMA requires otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) lp->rx_skb[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) for (i = 0; i < SONIC_NUM_RRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) SONIC_RBSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (dma_mapping_error(lp->device, laddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) while(i > 0) { /* free any that were mapped successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) lp->rx_laddr[i] = (dma_addr_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < SONIC_NUM_RRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dev_kfree_skb(lp->rx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) lp->rx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) lp->rx_laddr[i] = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Initialize the SONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sonic_init(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Wait for the SONIC to become idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct sonic_local * __maybe_unused lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u16 bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) for (i = 0; i < 1000; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bits = SONIC_READ(SONIC_CMD) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!may_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Close the SONIC device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int sonic_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) netif_dbg(lp, ifdown, dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * stop the SONIC, disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) sonic_quiesce(dev, SONIC_CR_ALL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) SONIC_WRITE(SONIC_IMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) SONIC_WRITE(SONIC_ISR, 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* unmap and free skbs that haven't been transmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 0; i < SONIC_NUM_TDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if(lp->tx_laddr[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) lp->tx_laddr[i] = (dma_addr_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if(lp->tx_skb[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dev_kfree_skb(lp->tx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) lp->tx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* unmap and free the receive buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (i = 0; i < SONIC_NUM_RRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if(lp->rx_laddr[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) lp->rx_laddr[i] = (dma_addr_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if(lp->rx_skb[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dev_kfree_skb(lp->rx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) lp->rx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * put the Sonic into software-reset mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * disable all interrupts before releasing DMA buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sonic_quiesce(dev, SONIC_CR_ALL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) SONIC_WRITE(SONIC_IMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) SONIC_WRITE(SONIC_ISR, 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* We could resend the original skbs. Easier to re-initialise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) for (i = 0; i < SONIC_NUM_TDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if(lp->tx_laddr[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) lp->tx_laddr[i] = (dma_addr_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if(lp->tx_skb[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev_kfree_skb(lp->tx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) lp->tx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Try to restart the adaptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) sonic_init(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) lp->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * transmit packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Appends new TD during transmission thus avoiding any TX interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * until we run out of TDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * This routine interacts closely with the ISR in that it may,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * set tx_skb[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * reset the status flags of the new TD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * set and reset EOL flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * stop the tx queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * The ISR interacts with this routine in various ways. It may,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * reset tx_skb[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * test the EOL and status flags of the TDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * wake the tx queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Concurrently with all of this, the SONIC is potentially writing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * the status flags of the TDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dma_addr_t laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) length = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (length < ETH_ZLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (skb_padto(skb, ETH_ZLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) length = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Map the packet data into the logical DMA address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!laddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) sonic_tda_put(dev, entry, SONIC_TD_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) lp->tx_len[entry] = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) lp->tx_laddr[entry] = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) lp->tx_skb[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) lp->eol_tx = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) entry = (entry + 1) & SONIC_TDS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (lp->tx_skb[entry]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* The ring is full, the ISR has yet to process the next TD. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* after this packet, wait for ISR to free up some TDAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * The typical workload of the driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Handle the network interface interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static irqreturn_t sonic_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * with sonic_send_packet() so that the two functions can share state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Secondly, it makes sonic_interrupt() re-entrant, as that is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * by macsonic which must use two IRQs with different priority levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (status & SONIC_INT_PKTRX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) sonic_rx(dev); /* got packet(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (status & SONIC_INT_TXDN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int entry = lp->cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int td_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int freed_some = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* The state of a Transmit Descriptor may be inferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * from { tx_skb[entry], td_status } as follows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * { clear, clear } => the TD has never been used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * { set, clear } => the TD was handed to SONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * { set, set } => the TD was handed back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * { clear, set } => the TD is available for re-use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) while (lp->tx_skb[entry] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (td_status & SONIC_TCR_PTX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) lp->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (td_status & (SONIC_TCR_EXD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) SONIC_TCR_EXC | SONIC_TCR_BCM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) lp->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (td_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) lp->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (td_status & SONIC_TCR_OWC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) lp->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (td_status & SONIC_TCR_FU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) lp->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* We must free the original skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dev_consume_skb_irq(lp->tx_skb[entry]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) lp->tx_skb[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* and unmap DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) lp->tx_laddr[entry] = (dma_addr_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) freed_some = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) entry = (entry + 1) & SONIC_TDS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) entry = (entry + 1) & SONIC_TDS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (freed_some || lp->tx_skb[entry] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) netif_wake_queue(dev); /* The ring is no longer full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) lp->cur_tx = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * check error conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (status & SONIC_INT_RFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (status & SONIC_INT_RDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (status & SONIC_INT_RBAE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* counter overruns; all counters are 16bit wide */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (status & SONIC_INT_FAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) lp->stats.rx_frame_errors += 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (status & SONIC_INT_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) lp->stats.rx_crc_errors += 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (status & SONIC_INT_MP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) lp->stats.rx_missed_errors += 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* transmit error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (status & SONIC_INT_TXER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) u16 tcr = SONIC_READ(SONIC_TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) __func__, tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) SONIC_TCR_FU | SONIC_TCR_BCM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Aborted transmission. Try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* bus retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (status & SONIC_INT_BR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* ... to help debug DMA problems causing endless interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Bounce the eth interface to turn on the interrupt again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) SONIC_WRITE(SONIC_IMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } while (status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Return the array index corresponding to a given Receive Buffer pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned int last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) unsigned int i = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) i = (i + 1) & SONIC_RRS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (addr == lp->rx_laddr[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) } while (i != last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Allocate and map a new skb to be used as a receive buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct sk_buff **new_skb, dma_addr_t *new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!*new_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) skb_reserve(*new_skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) SONIC_RBSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!*new_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dev_kfree_skb(*new_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *new_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Place a new receive resource in the Receive Resource Area and update RWP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dma_addr_t old_addr, dma_addr_t new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * scans the other resources in the RRA, those in the range [RWP, RRP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (buf == old_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) entry = (entry + 1) & SONIC_RRS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) } while (entry != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) WARN_ONCE(buf != old_addr, "failed to find resource!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) entry = (entry + 1) & SONIC_RRS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * We have a good packet(s), pass it/them up the network stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void sonic_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int entry = lp->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int prev_entry = lp->eol_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) bool rbe = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* If the RD has LPKT set, the chip has finished with the RB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct sk_buff *new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dma_addr_t new_laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) u32 addr = (sonic_rda_get(dev, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) SONIC_RD_PKTPTR_H) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int i = index_from_addr(lp, addr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) WARN_ONCE(1, "failed to find buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct sk_buff *used_skb = lp->rx_skb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Pass the used buffer up the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pkt_len = sonic_rda_get(dev, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) SONIC_RD_PKTLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) skb_trim(used_skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) used_skb->protocol = eth_type_trans(used_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) netif_rx(used_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) lp->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) lp->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) lp->rx_skb[i] = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) lp->rx_laddr[i] = new_laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Failed to obtain a new buffer so re-use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) new_laddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) lp->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* If RBE is already asserted when RWP advances then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * it's safe to clear RBE after processing this packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) sonic_update_rra(dev, lp, addr, new_laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * give back the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) prev_entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) entry = (entry + 1) & SONIC_RDS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) lp->cur_rx = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (prev_entry != lp->eol_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Advance the EOL flag to put descriptors back into service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) lp->eol_rx = prev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (rbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Get the current statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * This may be called with the device open or closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static struct net_device_stats *sonic_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* read the tally counter from the SONIC and reset them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) SONIC_WRITE(SONIC_CRCT, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) SONIC_WRITE(SONIC_FAET, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) SONIC_WRITE(SONIC_MPT, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return &lp->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Set or clear the multicast filter for this adaptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void sonic_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned int rcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) rcr |= SONIC_RCR_PRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if ((dev->flags & IFF_ALLMULTI) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) (netdev_mc_count(dev) > 15)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) rcr |= SONIC_RCR_AMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) netdev_mc_count(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) sonic_set_cam_enable(dev, 1); /* always enable our own address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) addr = ha->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) SONIC_WRITE(SONIC_CDC, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* LCAM and TXP commands can't be used simultaneously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) sonic_quiesce(dev, SONIC_CR_TXP, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sonic_quiesce(dev, SONIC_CR_LCAM, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) SONIC_WRITE(SONIC_RCR, rcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * Initialize the SONIC ethernet controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static int sonic_init(struct net_device *dev, bool may_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct sonic_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * put the Sonic into software-reset mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * disable all interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) SONIC_WRITE(SONIC_IMR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) SONIC_WRITE(SONIC_ISR, 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* While in reset mode, clear CAM Enable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) SONIC_WRITE(SONIC_CE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * clear software reset flag, disable receiver, clear and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * enable interrupts, then completely initialize the SONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) SONIC_WRITE(SONIC_CMD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) sonic_quiesce(dev, SONIC_CR_ALL, may_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * initialize the receive resource area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) for (i = 0; i < SONIC_NUM_RRS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* initialize all RRA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* load the resource pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Initialize the receive descriptors so that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * become a circular linked list, ie. let the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * descriptor point to the first again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) for (i=0; i<SONIC_NUM_RDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) sonic_rda_put(dev, i, SONIC_RD_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) lp->rda_laddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* fix last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) (lp->rda_laddr & 0xffff) | SONIC_EOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) lp->eol_rx = SONIC_NUM_RDS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) lp->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * initialize transmit descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) for (i = 0; i < SONIC_NUM_TDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) sonic_tda_put(dev, i, SONIC_TD_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) (lp->tda_laddr & 0xffff) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) lp->tx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* fix last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) (lp->tda_laddr & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) lp->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) lp->eol_tx = SONIC_NUM_TDS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * put our own address to CAM desc[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) sonic_set_cam_enable(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * initialize CAM registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) SONIC_WRITE(SONIC_CDC, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * load the CAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * enable receiver, disable loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * and enable all interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) SONIC_WRITE(SONIC_ISR, 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) SONIC_READ(SONIC_CMD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) MODULE_LICENSE("GPL");