^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007-2012 Nicira, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/if_bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/llc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/openvswitch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "datapath.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "vport.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "vport-internal_dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "vport-netdev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static struct vport_ops ovs_netdev_vport_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Must be called with rcu_read_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static void netdev_port_receive(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) vport = ovs_netdev_get_vport(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (unlikely(!vport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (unlikely(skb_warn_if_lro(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Make our own copy of the packet. Otherwise we will mangle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) skb = skb_share_check(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (skb->dev->type == ARPHRD_ETHER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) skb_push(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Called with rcu_read_lock and bottom-halves disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct sk_buff *skb = *pskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return RX_HANDLER_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) netdev_port_receive(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return RX_HANDLER_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static struct net_device *get_dpdev(const struct datapath *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct vport *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) local = ovs_vport_ovsl(dp, OVSP_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return local->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct vport *ovs_netdev_link(struct vport *vport, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!vport->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) goto error_free_vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (vport->dev->flags & IFF_LOOPBACK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) (vport->dev->type != ARPHRD_ETHER &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) vport->dev->type != ARPHRD_NONE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ovs_is_internal_dev(vport->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) goto error_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) err = netdev_master_upper_dev_link(vport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) get_dpdev(vport->dp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) goto error_master_upper_dev_unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dev_disable_lro(vport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dev_set_promiscuity(vport->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) vport->dev->priv_flags |= IFF_OVS_DATAPATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) error_master_upper_dev_unlink:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) error_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dev_put(vport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) error_free_vport:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ovs_vport_free(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL_GPL(ovs_netdev_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static struct vport *netdev_create(const struct vport_parms *parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (IS_ERR(vport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return ovs_netdev_link(vport, parms->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void vport_netdev_free(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct vport *vport = container_of(rcu, struct vport, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (vport->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev_put(vport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ovs_vport_free(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void ovs_netdev_detach_dev(struct vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) netdev_rx_handler_unregister(vport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) netdev_upper_dev_unlink(vport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) netdev_master_upper_dev_get(vport->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dev_set_promiscuity(vport->dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void netdev_destroy(struct vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (netif_is_ovs_port(vport->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ovs_netdev_detach_dev(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) call_rcu(&vport->rcu, vport_netdev_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void ovs_netdev_tunnel_destroy(struct vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (netif_is_ovs_port(vport->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ovs_netdev_detach_dev(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* We can be invoked by both explicit vport deletion and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * underlying netdev deregistration; delete the link only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * if it's not already shutting down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (vport->dev->reg_state == NETREG_REGISTERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rtnl_delete_link(vport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dev_put(vport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) vport->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) call_rcu(&vport->rcu, vport_netdev_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Returns null if this device is not attached to a datapath. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct vport *ovs_netdev_get_vport(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (likely(netif_is_ovs_port(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return (struct vport *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) rcu_dereference_rtnl(dev->rx_handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static struct vport_ops ovs_netdev_vport_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) .type = OVS_VPORT_TYPE_NETDEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .create = netdev_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .destroy = netdev_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .send = dev_queue_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int __init ovs_netdev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return ovs_vport_ops_register(&ovs_netdev_vport_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void ovs_netdev_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }