^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/net/ethernet/ec_bhf.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* This is a driver for EtherCAT master module present on CCAT FPGA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Those can be found on Bechhoff CX50xx industrial PCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define TIMER_INTERVAL_NSEC 20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define INFO_BLOCK_SIZE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define INFO_BLOCK_TYPE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define INFO_BLOCK_REV 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define INFO_BLOCK_BLK_CNT 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define INFO_BLOCK_TX_CHAN 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define INFO_BLOCK_RX_CHAN 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define INFO_BLOCK_OFFSET 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define EC_MII_OFFSET 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define EC_FIFO_OFFSET 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define EC_MAC_OFFSET 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define MAC_FRAME_ERR_CNT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define MAC_RX_ERR_CNT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define MAC_CRC_ERR_CNT 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MAC_LNK_LST_ERR_CNT 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define MAC_TX_FRAME_CNT 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define MAC_RX_FRAME_CNT 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define MAC_TX_FIFO_LVL 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define MAC_DROPPED_FRMS 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MAC_CONNECTED_CCAT_FLAG 0x78
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define MII_MAC_ADDR 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define MII_MAC_FILT_FLAG 0xe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MII_LINK_STATUS 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define FIFO_TX_REG 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define FIFO_TX_RESET 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define FIFO_RX_REG 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define FIFO_RX_ADDR_VALID (1u << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define FIFO_RX_RESET 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define DMA_CHAN_OFFSET 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DMA_CHAN_SIZE 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DMA_WINDOW_SIZE_MASK 0xfffffffc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define ETHERCAT_MASTER_ID 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static const struct pci_device_id ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { PCI_DEVICE(0x15ec, 0x5000), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_DEVICE_TABLE(pci, ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct rx_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define RXHDR_NEXT_ADDR_MASK 0xffffffu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define RXHDR_NEXT_VALID (1u << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __le32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define RXHDR_NEXT_RECV_FLAG 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __le32 recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define RXHDR_LEN_MASK 0xfffu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __le16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __le16 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __le32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u8 timestamp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define PKT_PAYLOAD_SIZE 0x7e8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct rx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct rx_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u8 data[PKT_PAYLOAD_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct tx_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __le16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TX_HDR_PORT_0 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define TX_HDR_PORT_1 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u8 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u8 ts_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TX_HDR_SENT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __le32 sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u8 timestamp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct tx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct tx_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u8 data[PKT_PAYLOAD_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define FIFO_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static long polling_frequency = TIMER_INTERVAL_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct bhf_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dma_addr_t buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 *alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) size_t alloc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dma_addr_t alloc_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct ec_bhf_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct net_device *net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __iomem *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void __iomem *dma_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct hrtimer hrtimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int tx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int rx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void __iomem *ec_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void __iomem *fifo_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void __iomem *mii_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void __iomem *mac_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct bhf_dma rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct rx_desc *rx_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int rx_dnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int rx_dcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct bhf_dma tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct tx_desc *tx_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int tx_dcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int tx_dnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u64 stat_rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u64 stat_tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void ec_bhf_reset(struct ec_bhf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 addr = (u8 *)desc - priv->tx_buf.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int ec_bhf_desc_sent(struct tx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (unlikely(netif_queue_stopped(priv->net_dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Make sure that we perceive changes to tx_dnext. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) netif_wake_queue(priv->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static int ec_bhf_pkt_received(struct rx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) priv->fifo_io + FIFO_RX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) while (ec_bhf_pkt_received(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int pkt_size = (le16_to_cpu(desc->header.len) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u8 *data = desc->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) skb_put_data(skb, data, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) skb->protocol = eth_type_trans(skb, priv->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) priv->stat_rx_bytes += pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dev_err_ratelimited(PRIV_TO_DEV(priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) "Couldn't allocate a skb_buff for a packet of size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) desc->header.recv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ec_bhf_add_rx_desc(priv, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) desc = &priv->rx_descs[priv->rx_dnext];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ec_bhf_process_rx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ec_bhf_process_tx(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!netif_running(priv->net_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) hrtimer_forward_now(timer, polling_frequency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct device *dev = PRIV_TO_DEV(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned block_count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void __iomem *ec_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) for (i = 0; i < block_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) INFO_BLOCK_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (type == ETHERCAT_MASTER_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (i == block_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dev_err(dev, "EtherCAT master with DMA block not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ec_info = priv->io + i * INFO_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct ec_bhf_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) desc = &priv->tx_descs[priv->tx_dnext];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) skb_copy_and_csum_dev(skb, desc->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) memset(&desc->header, 0, sizeof(desc->header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) desc->header.len = cpu_to_le16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) desc->header.port = TX_HDR_PORT_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ec_bhf_send_packet(priv, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Make sure that updates to tx_dnext are perceived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * by timer routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) priv->stat_tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct bhf_dma *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct device *dev = PRIV_TO_DEV(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) iowrite32(0xffffffff, priv->dma_io + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mask = ioread32(priv->dma_io + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mask &= DMA_WINDOW_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* We want to allocate a chunk of memory that is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * - aligned to the mask we just read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * - is of size 2^mask bytes (at most)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * In order to ensure that we will allocate buffer of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * 2 * 2^mask bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) buf->len = min_t(int, ~mask + 1, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) buf->alloc_len = 2 * buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (buf->alloc == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dev_err(dev, "Failed to allocate buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) iowrite32(0, priv->dma_io + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) iowrite32(buf->buf_phys, priv->dma_io + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) priv->tx_dnext = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) for (i = 0; i < priv->tx_dcount; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) priv->rx_dnext = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) for (i = 0; i < priv->rx_dcount; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct rx_desc *desc = &priv->rx_descs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (i != priv->rx_dcount - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) next = (u8 *)(desc + 1) - priv->rx_buf.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) next |= RXHDR_NEXT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) desc->header.next = cpu_to_le32(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) desc->header.recv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ec_bhf_add_rx_desc(priv, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int ec_bhf_open(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct ec_bhf_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct device *dev = PRIV_TO_DEV(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ec_bhf_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) FIFO_SIZE * sizeof(struct rx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dev_err(dev, "Failed to allocate rx buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ec_bhf_setup_rx_descs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) FIFO_SIZE * sizeof(struct tx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dev_err(dev, "Failed to allocate tx buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) goto error_rx_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ec_bhf_setup_tx_descs(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) netif_start_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) priv->hrtimer.function = ec_bhf_timer_fun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) error_rx_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) priv->rx_buf.alloc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int ec_bhf_stop(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct ec_bhf_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct device *dev = PRIV_TO_DEV(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) hrtimer_cancel(&priv->hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ec_bhf_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) netif_tx_disable(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dma_free_coherent(dev, priv->tx_buf.alloc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dma_free_coherent(dev, priv->rx_buf.alloc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ec_bhf_get_stats(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct ec_bhf_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) stats->tx_bytes = priv->stat_tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) stats->rx_bytes = priv->stat_rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static const struct net_device_ops ec_bhf_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .ndo_start_xmit = ec_bhf_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .ndo_open = ec_bhf_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .ndo_stop = ec_bhf_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .ndo_get_stats64 = ec_bhf_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .ndo_set_mac_address = eth_mac_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct net_device *net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct ec_bhf_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) void __iomem *dma_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) void __iomem *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) err = pci_enable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pci_set_master(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) "Required dma mask not supported, failed to initialize device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) goto err_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) "Required dma mask not supported, failed to initialize device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto err_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) err = pci_request_regions(dev, "ec_bhf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(&dev->dev, "Failed to request pci memory regions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto err_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) io = pci_iomap(dev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dev_err(&dev->dev, "Failed to map pci card memory bar 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) goto err_release_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dma_io = pci_iomap(dev, 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!dma_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dev_err(&dev->dev, "Failed to map pci card memory bar 2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (net_dev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) goto err_unmap_dma_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pci_set_drvdata(dev, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) SET_NETDEV_DEV(net_dev, &dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) net_dev->features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) net_dev->flags |= IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) net_dev->netdev_ops = &ec_bhf_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) priv->net_dev = net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) priv->io = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) priv->dma_io = dma_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) err = ec_bhf_setup_offsets(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) goto err_free_net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) err = register_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto err_free_net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) err_free_net_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) free_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) err_unmap_dma_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pci_iounmap(dev, dma_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) err_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pci_iounmap(dev, io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) err_release_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pci_release_regions(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) err_disable_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pci_clear_master(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pci_disable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void ec_bhf_remove(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct net_device *net_dev = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct ec_bhf_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unregister_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) pci_iounmap(dev, priv->dma_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pci_iounmap(dev, priv->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) free_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pci_release_regions(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pci_clear_master(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pci_disable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static struct pci_driver pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .name = "ec_bhf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .id_table = ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .probe = ec_bhf_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .remove = ec_bhf_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) module_pci_driver(pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) module_param(polling_frequency, long, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");