^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * rionet - Ethernet driver over RapidIO messaging services
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2005 MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Matt Porter <mporter@kernel.crashing.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/rio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/rio_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/rio_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DRV_NAME "rionet"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DRV_VERSION "0.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DRV_DESC "Ethernet over RapidIO"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) MODULE_AUTHOR(DRV_AUTHOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) MODULE_DESCRIPTION(DRV_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define RIONET_DEFAULT_MSGLEVEL \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) (NETIF_MSG_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) NETIF_MSG_LINK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) NETIF_MSG_RX_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) NETIF_MSG_TX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define RIONET_DOORBELL_JOIN 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define RIONET_DOORBELL_LEAVE 0x1001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define RIONET_MAILBOX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define RIONET_MAX_NETS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct rionet_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rio_mport *mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int ack_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) spinlock_t tx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) bool open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct rionet_peer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct rio_dev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct rionet_net {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct list_head peers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) spinlock_t lock; /* net info access lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct rio_dev **active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int nact; /* number of active peers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static struct rionet_net nets[RIONET_MAX_NETS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define is_rionet_capable(src_ops, dst_ops) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) (dst_ops & RIO_DST_OPS_DATA_MSG) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) (src_ops & RIO_SRC_OPS_DOORBELL) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) (dst_ops & RIO_DST_OPS_DOORBELL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define dev_rionet_capable(dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) is_rionet_capable(dev->src_ops, dev->dst_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static int rionet_rx_clean(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) i = rnet->rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!rnet->rx_skb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) rnet->rx_skb[i]->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rnet->rx_skb[i]->protocol =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) eth_type_trans(rnet->rx_skb[i], ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) error = netif_rx(rnet->rx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (error == NET_RX_DROP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ndev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ndev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void rionet_rx_fill(struct net_device *ndev, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) i = rnet->rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!rnet->rx_skb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) rnet->rx_skb[i]->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) rnet->rx_slot = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct rio_dev *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) rnet->tx_skb[rnet->tx_slot] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ndev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ndev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ++rnet->tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (netif_msg_tx_queued(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static netdev_tx_t rionet_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct ethhdr *eth = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u16 destid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int add_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) spin_lock_irqsave(&rnet->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (is_multicast_ether_addr(eth->h_dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) add_num = nets[rnet->mport->id].nact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) spin_unlock_irqrestore(&rnet->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (is_multicast_ether_addr(eth->h_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (nets[rnet->mport->id].active[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rionet_queue_tx_msg(skb, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) nets[rnet->mport->id].active[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) refcount_inc(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) } else if (RIONET_MAC_MATCH(eth->h_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) destid = RIONET_GET_DESTID(eth->h_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (nets[rnet->mport->id].active[destid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rionet_queue_tx_msg(skb, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) nets[rnet->mport->id].active[destid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * If the target device was removed from the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * active peers but we still have TX packets targeting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * it just report sending a packet to the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * (without actual packet transfer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ndev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ndev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_unlock_irqrestore(&rnet->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u16 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct net_device *ndev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct rionet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned char netid = rnet->mport->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (netif_msg_intr(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) DRV_NAME, sid, tid, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (info == RIONET_DOORBELL_JOIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!nets[netid].active[sid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spin_lock(&nets[netid].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list_for_each_entry(peer, &nets[netid].peers, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (peer->rdev->destid == sid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) nets[netid].active[sid] = peer->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) nets[netid].nact++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) spin_unlock(&nets[netid].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rio_mport_send_doorbell(mport, sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) RIONET_DOORBELL_JOIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else if (info == RIONET_DOORBELL_LEAVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_lock(&nets[netid].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (nets[netid].active[sid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) nets[netid].active[sid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) nets[netid].nact--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) spin_unlock(&nets[netid].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (netif_msg_intr(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) printk(KERN_WARNING "%s: unhandled doorbell\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct net_device *ndev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (netif_msg_intr(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) DRV_NAME, mbox, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock(&rnet->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rionet_rx_fill(ndev, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) spin_unlock(&rnet->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct net_device *ndev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_lock(&rnet->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (netif_msg_intr(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) "%s: outbound message event, mbox %d slot %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) DRV_NAME, mbox, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* dma unmap single */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) rnet->tx_skb[rnet->ack_slot] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ++rnet->ack_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) rnet->tx_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) netif_wake_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) spin_unlock(&rnet->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int rionet_open(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct rionet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned char netid = rnet->mport->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (netif_msg_ifup(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) printk(KERN_INFO "%s: open\n", DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if ((rc = rio_request_inb_dbell(rnet->mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) (void *)ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) RIONET_DOORBELL_JOIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) RIONET_DOORBELL_LEAVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) rionet_dbell_event)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if ((rc = rio_request_inb_mbox(rnet->mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) (void *)ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) RIONET_MAILBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) RIONET_RX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) rionet_inb_msg_event)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if ((rc = rio_request_outb_mbox(rnet->mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) (void *)ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) RIONET_MAILBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) RIONET_TX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rionet_outb_msg_event)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Initialize inbound message ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) for (i = 0; i < RIONET_RX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) rnet->rx_skb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rnet->rx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rionet_rx_fill(ndev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) rnet->tx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) rnet->tx_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) rnet->ack_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) netif_carrier_on(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) netif_start_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) spin_lock_irqsave(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) list_for_each_entry(peer, &nets[netid].peers, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Send a join message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) spin_unlock_irqrestore(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) rnet->open = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int rionet_close(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct rionet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned char netid = rnet->mport->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (netif_msg_ifup(rnet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) netif_carrier_off(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rnet->open = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) for (i = 0; i < RIONET_RX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) kfree_skb(rnet->rx_skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_lock_irqsave(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) list_for_each_entry(peer, &nets[netid].peers, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (nets[netid].active[peer->rdev->destid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) nets[netid].active[peer->rdev->destid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (peer->res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rio_release_outb_dbell(peer->rdev, peer->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) spin_unlock_irqrestore(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) RIONET_DOORBELL_LEAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct rio_dev *rdev = to_rio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned char netid = rdev->net->hport->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct rionet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int state, found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!dev_rionet_capable(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) spin_lock_irqsave(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) list_for_each_entry(peer, &nets[netid].peers, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (peer->rdev == rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) list_del(&peer->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (nets[netid].active[rdev->destid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) state = atomic_read(&rdev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (state != RIO_DEVICE_GONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) state != RIO_DEVICE_INITIALIZING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rio_send_doorbell(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) RIONET_DOORBELL_LEAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) nets[netid].active[rdev->destid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) nets[netid].nact--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) spin_unlock_irqrestore(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (peer->res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) rio_release_outb_dbell(rdev, peer->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) kfree(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void rionet_get_drvinfo(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static u32 rionet_get_msglevel(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return rnet->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void rionet_set_msglevel(struct net_device *ndev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct rionet_private *rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rnet->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static const struct ethtool_ops rionet_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .get_drvinfo = rionet_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .get_msglevel = rionet_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .set_msglevel = rionet_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static const struct net_device_ops rionet_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .ndo_open = rionet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .ndo_stop = rionet_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .ndo_start_xmit = rionet_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct rionet_private *rnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u16 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) const size_t rionet_active_bytes = sizeof(void *) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) get_order(rionet_active_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!nets[mport->id].active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) memset((void *)nets[mport->id].active, 0, rionet_active_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* Set up private area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) rnet = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) rnet->mport = mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) rnet->open = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* Set the default MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) device_id = rio_local_get_device_id(mport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ndev->dev_addr[0] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ndev->dev_addr[1] = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ndev->dev_addr[2] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ndev->dev_addr[3] = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ndev->dev_addr[4] = device_id >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ndev->dev_addr[5] = device_id & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ndev->netdev_ops = &rionet_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ndev->mtu = RIONET_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* MTU range: 68 - 4082 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ndev->min_mtu = ETH_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ndev->max_mtu = RIONET_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ndev->features = NETIF_F_LLTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) SET_NETDEV_DEV(ndev, &mport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ndev->ethtool_ops = &rionet_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_lock_init(&rnet->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_lock_init(&rnet->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rc = register_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) free_pages((unsigned long)nets[mport->id].active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) get_order(rionet_active_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ndev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) DRV_DESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) DRV_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ndev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mport->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u32 lsrc_ops, ldst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct rionet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct net_device *ndev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct rio_dev *rdev = to_rio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) unsigned char netid = rdev->net->hport->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (netid >= RIONET_MAX_NETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * If first time through this net, make sure local device is rionet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * capable and setup netdev (this step will be skipped in later probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * on the same net).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!nets[netid].ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) &lsrc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) &ldst_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) "%s: local device %s is not network capable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) DRV_NAME, rdev->net->hport->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Allocate our net_device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ndev = alloc_etherdev(sizeof(struct rionet_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (ndev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rc = rionet_setup_netdev(rdev->net->hport, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) DRV_NAME, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) INIT_LIST_HEAD(&nets[netid].peers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) spin_lock_init(&nets[netid].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) nets[netid].nact = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) nets[netid].ndev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * If the remote device has mailbox/doorbell capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * add it to the peer list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (dev_rionet_capable(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct rionet_private *rnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rnet = netdev_priv(nets[netid].ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) peer = kzalloc(sizeof(*peer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) peer->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) peer->res = rio_request_outb_dbell(peer->rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) RIONET_DOORBELL_JOIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) RIONET_DOORBELL_LEAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!peer->res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pr_err("%s: error requesting doorbells\n", DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) kfree(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_lock_irqsave(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) list_add_tail(&peer->node, &nets[netid].peers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) spin_unlock_irqrestore(&nets[netid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) pr_debug("%s: %s add peer %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) DRV_NAME, __func__, rio_name(rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* If netdev is already opened, send join request to new peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (rnet->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct rionet_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) pr_debug("%s: %s\n", DRV_NAME, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (i = 0; i < RIONET_MAX_NETS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!nets[i].ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_lock_irqsave(&nets[i].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) list_for_each_entry(peer, &nets[i].peers, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (nets[i].active[peer->rdev->destid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) rio_send_doorbell(peer->rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) RIONET_DOORBELL_LEAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) nets[i].active[peer->rdev->destid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) spin_unlock_irqrestore(&nets[i].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void rionet_remove_mport(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct class_interface *class_intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct rio_mport *mport = to_rio_mport(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int id = mport->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_debug("%s %s\n", __func__, mport->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) WARN(nets[id].nact, "%s called when connected to %d peers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) __func__, nets[id].nact);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (nets[id].ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ndev = nets[id].ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) free_pages((unsigned long)nets[id].active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) get_order(sizeof(void *) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) nets[id].active = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) nets[id].ndev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static struct rio_device_id rionet_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) { 0, } /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) MODULE_DEVICE_TABLE(rapidio, rionet_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static struct subsys_interface rionet_interface = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .name = "rionet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .subsys = &rio_bus_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .add_dev = rionet_add_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .remove_dev = rionet_remove_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static struct notifier_block rionet_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .notifier_call = rionet_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* the rio_mport_interface is used to handle local mport devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static struct class_interface rio_mport_interface __refdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) .class = &rio_mport_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) .add_dev = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) .remove_dev = rionet_remove_mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static int __init rionet_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ret = register_reboot_notifier(&rionet_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pr_err("%s: failed to register reboot notifier (err=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) DRV_NAME, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ret = class_interface_register(&rio_mport_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) pr_err("%s: class_interface_register error: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) DRV_NAME, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return subsys_interface_register(&rionet_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void __exit rionet_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unregister_reboot_notifier(&rionet_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) subsys_interface_unregister(&rionet_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) class_interface_unregister(&rio_mport_interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) late_initcall(rionet_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) module_exit(rionet_exit);