Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * vxcan.c - Virtual CAN Tunnel for cross namespace communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This code is derived from drivers/net/can/vcan.c for the virtual CAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * specific parts and from drivers/net/veth.c to implement the netlink API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * for network interface pairs in a common and established way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/can.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/can/dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/can/skb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/can/vxcan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/can/can-ml.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define DRV_NAME "vxcan"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) MODULE_DESCRIPTION("Virtual CAN Tunnel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) MODULE_ALIAS_RTNL_LINK(DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) struct vxcan_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct net_device __rcu	*peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct vxcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct net_device *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct net_device_stats *peerstats, *srcstats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	u8 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (can_dropped_invalid_skb(dev, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	peer = rcu_dereference(priv->peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (unlikely(!peer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	skb = can_create_echo_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/* reset CAN GW hop counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	skb->csum_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	skb->pkt_type   = PACKET_BROADCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	skb->dev        = peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	skb->ip_summed  = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	len = cfd->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		srcstats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		srcstats->tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		peerstats = &peer->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		peerstats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		peerstats->rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static int vxcan_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	struct vxcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct net_device *peer = rtnl_dereference(priv->peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (!peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (peer->flags & IFF_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		netif_carrier_on(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static int vxcan_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct vxcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct net_device *peer = rtnl_dereference(priv->peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		netif_carrier_off(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int vxcan_get_iflink(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct vxcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct net_device *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	int iflink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	peer = rcu_dereference(priv->peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	iflink = peer ? peer->ifindex : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	return iflink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/* Do not allow changing the MTU while running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static const struct net_device_ops vxcan_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	.ndo_open	= vxcan_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	.ndo_stop	= vxcan_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	.ndo_start_xmit	= vxcan_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	.ndo_get_iflink	= vxcan_get_iflink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	.ndo_change_mtu = vxcan_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void vxcan_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct can_ml_priv *can_ml;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	dev->type		= ARPHRD_CAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	dev->mtu		= CANFD_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	dev->hard_header_len	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	dev->addr_len		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	dev->tx_queue_len	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	dev->flags		= IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	dev->netdev_ops		= &vxcan_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	dev->needs_free_netdev	= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	can_set_ml_priv(dev, can_ml);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* forward declaration for rtnl_create_link() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct rtnl_link_ops vxcan_link_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int vxcan_newlink(struct net *net, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			 struct nlattr *tb[], struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct vxcan_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	struct net_device *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct net *peer_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	char ifname[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	unsigned char name_assign_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct ifinfomsg *ifmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	/* register peer device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (data && data[VXCAN_INFO_PEER]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		struct nlattr *nla_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		nla_peer = data[VXCAN_INFO_PEER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		ifmp = nla_data(nla_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		err = rtnl_nla_parse_ifla(peer_tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 					  nla_data(nla_peer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 					  sizeof(struct ifinfomsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 					  nla_len(nla_peer) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 					  sizeof(struct ifinfomsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 					  NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		tbp = peer_tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (ifmp && tbp[IFLA_IFNAME]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		name_assign_type = NET_NAME_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		name_assign_type = NET_NAME_ENUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	peer_net = rtnl_link_get_net(net, tbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (IS_ERR(peer_net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		return PTR_ERR(peer_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 				&vxcan_link_ops, tbp, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (IS_ERR(peer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		put_net(peer_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return PTR_ERR(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (ifmp && dev->ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		peer->ifindex = ifmp->ifi_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	err = register_netdevice(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	put_net(peer_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	peer_net = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		free_netdev(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	netif_carrier_off(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	err = rtnl_configure_link(peer, ifmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		goto unregister_network_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/* register first device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (tb[IFLA_IFNAME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	err = register_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		goto unregister_network_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	/* cross link the device pair */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	rcu_assign_pointer(priv->peer, peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	priv = netdev_priv(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	rcu_assign_pointer(priv->peer, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unregister_network_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	unregister_netdevice(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void vxcan_dellink(struct net_device *dev, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	struct vxcan_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct net_device *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	peer = rtnl_dereference(priv->peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	/* Note : dellink() is called from default_device_exit_batch(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * before a rcu_synchronize() point. The devices are guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * not being freed before one RCU grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	RCU_INIT_POINTER(priv->peer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	unregister_netdevice_queue(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		priv = netdev_priv(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		RCU_INIT_POINTER(priv->peer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		unregister_netdevice_queue(peer, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	[VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static struct net *vxcan_get_link_net(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct vxcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct net_device *peer = rtnl_dereference(priv->peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return peer ? dev_net(peer) : dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static struct rtnl_link_ops vxcan_link_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.kind		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.priv_size	= ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	.setup		= vxcan_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.newlink	= vxcan_newlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	.dellink	= vxcan_dellink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	.policy		= vxcan_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	.maxtype	= VXCAN_INFO_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	.get_link_net	= vxcan_get_link_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static __init int vxcan_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	pr_info("vxcan: Virtual CAN Tunnel driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return rtnl_link_register(&vxcan_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static __exit void vxcan_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	rtnl_link_unregister(&vxcan_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) module_init(vxcan_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) module_exit(vxcan_exit);