^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mpls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/netconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/ip_fib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/netevent.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/netns/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/ipv6_stubs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/rtnh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* max memory we will use for mpls_route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define MAX_MPLS_ROUTE_MEM 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Maximum number of labels to look ahead at when selecting a path of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * a multipath route
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MAX_MP_SELECT_LABELS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int label_limit = (1 << 20) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int ttl_max = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #if IS_ENABLED(CONFIG_NET_IP_TUNNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static size_t ipgre_mpls_encap_hlen(struct ip_tunnel_encap *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return sizeof(struct mpls_shim_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static const struct ip_tunnel_encap_ops mpls_iptun_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .encap_hlen = ipgre_mpls_encap_hlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int ipgre_tunnel_encap_add_mpls_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return ip_tunnel_encap_add_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static void ipgre_tunnel_encap_del_mpls_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ip_tunnel_encap_del_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int ipgre_tunnel_encap_add_mpls_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void ipgre_tunnel_encap_del_mpls_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct nlmsghdr *nlh, struct net *net, u32 portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int nlm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct mpls_route *rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (index < net->mpls.platform_labels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct mpls_route __rcu **platform_label =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) rcu_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rt = rcu_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bool mpls_output_possible(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(mpls_output_possible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return (u8 *)nh + rt->rt_via_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static const u8 *mpls_nh_via(const struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) const struct mpls_nh *nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* The size of the layer 2.5 labels to be added for this route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return nh->nh_labels * sizeof(struct mpls_shim_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int mpls_dev_mtu(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* The amount of data the layer 2 frame can hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) EXPORT_SYMBOL_GPL(mpls_dev_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (skb->len <= mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void mpls_stats_inc_outucastpkts(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (skb->protocol == htons(ETH_P_MPLS_UC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) MPLS_INC_STATS_LEN(mdev, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) tx_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) } else if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } else if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct inet6_dev *in6dev = __in6_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (in6dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) IP6_UPD_PO_STATS(dev_net(dev), in6dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) IPSTATS_MIB_OUT, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct mpls_entry_decoded dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int mpls_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct mpls_shim_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bool eli_seen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int label_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u32 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) label_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) mpls_hdr_len += sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!pskb_may_pull(skb, mpls_hdr_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Read and decode the current label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) hdr = mpls_hdr(skb) + label_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dec = mpls_entry_decode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* RFC6790 - reserved labels MUST NOT be used as keys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * for the load-balancing function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) hash = jhash_1word(dec.label, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* The entropy label follows the entropy label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * indicator, so this means that the entropy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * label was just added to the hash - no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * go any deeper either in the label stack or in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (eli_seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) } else if (dec.label == MPLS_LABEL_ENTROPY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) eli_seen = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!dec.bos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* found bottom label; does skb have room for a header? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) const struct iphdr *v4hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) v4hdr = (const struct iphdr *)(hdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (v4hdr->version == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) hash = jhash_3words(ntohl(v4hdr->saddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ntohl(v4hdr->daddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) v4hdr->protocol, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) } else if (v4hdr->version == 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pskb_may_pull(skb, mpls_hdr_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) sizeof(struct ipv6hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) const struct ipv6hdr *v6hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) v6hdr = (const struct ipv6hdr *)(hdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) hash = jhash_1word(v6hdr->nexthdr, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* number of alive nexthops (rt->rt_nhn_alive) and the flags for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * a next hop (nh->nh_flags) are modified by netdev event handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Since those fields can change at any moment, use READ_ONCE to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * access both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int nh_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u8 alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* No need to look further into packet if there's only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * one path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (rt->rt_nhn == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return rt->rt_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) alive = READ_ONCE(rt->rt_nhn_alive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (alive == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) hash = mpls_multipath_hash(rt, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nh_index = hash % alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (alive == rt->rt_nhn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) for_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned int nh_flags = READ_ONCE(nh->nh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (n == nh_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return mpls_get_nexthop(rt, nh_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static bool mpls_egress(struct net *net, struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct sk_buff *skb, struct mpls_entry_decoded dec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) enum mpls_payload_type payload_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bool success = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* The IPv4 code below accesses through the IPv4 header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * checksum, which is 12 bytes into the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * The IPv6 code below accesses through the IPv6 hop limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * which is 8 bytes into the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * For all supported cases there should always be at least 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * bytes of packet data present. The IPv4 header is 20 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * without options and the IPv6 header is always 40 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!pskb_may_pull(skb, 12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) payload_type = rt->rt_payload_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (payload_type == MPT_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) payload_type = ip_hdr(skb)->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) switch (payload_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case MPT_IPV4: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct iphdr *hdr4 = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u8 new_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) skb->protocol = htons(ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* If propagating TTL, take the decremented TTL from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * the incoming MPLS header, otherwise decrement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * TTL, but only if not 0 to avoid underflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) net->mpls.ip_ttl_propagate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) new_ttl = dec.ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) csum_replace2(&hdr4->check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) htons(hdr4->ttl << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) htons(new_ttl << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) hdr4->ttl = new_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case MPT_IPV6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct ipv6hdr *hdr6 = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) skb->protocol = htons(ETH_P_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* If propagating TTL, take the decremented TTL from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * the incoming MPLS header, otherwise decrement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * hop limit, but only if not 0 to avoid underflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) net->mpls.ip_ttl_propagate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) hdr6->hop_limit = dec.ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) else if (hdr6->hop_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) hdr6->hop_limit = hdr6->hop_limit - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case MPT_UNSPEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Should have decided which protocol it is by now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct packet_type *pt, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct mpls_shim_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct mpls_route *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct mpls_nh *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct mpls_entry_decoded dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct net_device *out_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct mpls_dev *out_mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) unsigned int hh_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int new_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned int mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Careful this entire function runs inside of an rcu critical section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!mdev->input_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) MPLS_INC_STATS(mdev, rx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (skb->pkt_type != PACKET_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!pskb_may_pull(skb, sizeof(*hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Read and decode the label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hdr = mpls_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dec = mpls_entry_decode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rt = mpls_route_input_rcu(net, dec.label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) MPLS_INC_STATS(mdev, rx_noroute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) nh = mpls_select_multipath(rt, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Pop the label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) skb_pull(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (skb_warn_if_lro(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) skb_forward_csum(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Verify ttl is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (dec.ttl <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dec.ttl -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* Find the output device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) out_dev = rcu_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!mpls_output_possible(out_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Verify the destination can hold the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) new_header_size = mpls_nh_header_size(nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mtu = mpls_dev_mtu(out_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (mpls_pkt_too_big(skb, mtu - new_header_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) hh_len = LL_RESERVED_SPACE(out_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!out_dev->header_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) hh_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Ensure there is enough space for the headers in the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (skb_cow(skb, hh_len + new_header_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) skb->dev = out_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) skb->protocol = htons(ETH_P_MPLS_UC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (unlikely(!new_header_size && dec.bos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Penultimate hop popping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bool bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) skb_push(skb, new_header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Push the new labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) hdr = mpls_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) bos = dec.bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) for (i = nh->nh_labels - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) hdr[i] = mpls_entry_encode(nh->nh_label[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dec.ttl, 0, bos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) bos = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mpls_stats_inc_outucastpkts(out_dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* If via wasn't specified then send out using device address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) out_dev->dev_addr, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) err = neigh_xmit(nh->nh_via_table, out_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) mpls_nh_via(rt, nh), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) net_dbg_ratelimited("%s: packet transmission failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tx_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (out_mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) MPLS_INC_STATS(out_mdev, tx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) MPLS_INC_STATS(mdev, rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return NET_RX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static struct packet_type mpls_packet_type __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .type = cpu_to_be16(ETH_P_MPLS_UC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .func = mpls_forward,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) [RTA_DST] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) [RTA_OIF] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) [RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct mpls_route_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) u32 rc_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u32 rc_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u8 rc_via_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) u8 rc_via_alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) u8 rc_via[MAX_VIA_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) u32 rc_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) u8 rc_ttl_propagate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u8 rc_output_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) u32 rc_output_label[MAX_NEW_LABELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u32 rc_nlflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) enum mpls_payload_type rc_payload_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct nl_info rc_nlinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct rtnexthop *rc_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int rc_mp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* all nexthops within a route have the same size based on max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * number of labels and max via length for a hop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct mpls_route *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) size = sizeof(*rt) + num_nh * nh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (size > MAX_MPLS_ROUTE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rt = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) rt->rt_nhn = num_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rt->rt_nhn_alive = num_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) rt->rt_nh_size = nh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void mpls_rt_free(struct mpls_route *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) kfree_rcu(rt, rt_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void mpls_notify_route(struct net *net, unsigned index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct mpls_route *old, struct mpls_route *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) const struct nl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct nlmsghdr *nlh = info ? info->nlh : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned portid = info ? info->portid : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct mpls_route *rt = new ? new : old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Ignore reserved labels for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void mpls_route_update(struct net *net, unsigned index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct mpls_route *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) const struct nl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct mpls_route *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) rt = rtnl_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rcu_assign_pointer(platform_label[index], new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mpls_notify_route(net, index, rt, new, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* If we removed a route free it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) mpls_rt_free(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static unsigned find_free_label(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) size_t platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) platform_labels = net->mpls.platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!rtnl_dereference(platform_label[index]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return LABEL_NOT_SPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #if IS_ENABLED(CONFIG_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static struct net_device *inet_fib_lookup_dev(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct in_addr daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) memcpy(&daddr, addr, sizeof(struct in_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return ERR_CAST(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev = rt->dst.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static struct net_device *inet_fib_lookup_dev(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static struct net_device *inet6_fib_lookup_dev(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct flowi6 fl6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!ipv6_stub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) memset(&fl6, 0, sizeof(fl6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (IS_ERR(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return ERR_CAST(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev = dst->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static struct net_device *inet6_fib_lookup_dev(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static struct net_device *find_outdev(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct mpls_nh *nh, int oif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!oif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) switch (nh->nh_via_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case NEIGH_ARP_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case NEIGH_ND_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case NEIGH_LINK_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev = dev_get_by_index(net, oif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (IS_ERR(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* The caller is holding rtnl anyways, so release the dev reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct mpls_nh *nh, int oif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct net_device *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dev = find_outdev(net, rt, nh, oif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (IS_ERR(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) err = PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Ensure this is a supported device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!mpls_dev_get(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) (dev->addr_len != nh->nh_via_alen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) RCU_INIT_POINTER(nh->nh_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!(dev->flags & IFF_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) nh->nh_flags |= RTNH_F_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) flags = dev_get_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) nh->nh_flags |= RTNH_F_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) u8 via_addr[], struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct rtvia *via = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) "Invalid attribute length for RTA_VIA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) alen = nla_len(nla) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) offsetof(struct rtvia, rtvia_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (alen > MAX_VIA_ALEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) "Invalid address length for RTA_VIA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Validate the address family */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) switch (via->rtvia_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) case AF_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *via_table = NEIGH_LINK_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *via_table = NEIGH_ARP_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (alen != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) *via_table = NEIGH_ND_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (alen != 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* Unsupported address family */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) memcpy(via_addr, via->rtvia_addr, alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) *via_alen = alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct mpls_route *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct net *net = cfg->rc_nlinfo.nl_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct mpls_nh *nh = rt->rt_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) nh->nh_labels = cfg->rc_output_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) for (i = 0; i < nh->nh_labels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) nh->nh_label[i] = cfg->rc_output_label[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) nh->nh_via_table = cfg->rc_via_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) nh->nh_via_alen = cfg->rc_via_alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) rt->rt_nhn_alive--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static int mpls_nh_build(struct net *net, struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct mpls_nh *nh, int oif, struct nlattr *via,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct nlattr *newdst, u8 max_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (newdst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) err = nla_get_labels(newdst, max_labels, &nh->nh_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) nh->nh_label, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (via) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) __mpls_nh_via(rt, nh), extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) err = mpls_nh_assign_dev(net, rt, nh, oif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u8 cfg_via_alen, u8 *max_via_alen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u8 *max_labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int remaining = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u8 nhs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) *max_via_alen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *max_labels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) while (rtnh_ok(rtnh, remaining)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int attrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) u8 n_labels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) attrlen = rtnh_attrlen(rtnh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) nla = nla_find(attrs, attrlen, RTA_VIA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (nla && nla_len(nla) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) offsetof(struct rtvia, rtvia_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int via_alen = nla_len(nla) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) offsetof(struct rtvia, rtvia_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (via_alen <= MAX_VIA_ALEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *max_via_alen = max_t(u16, *max_via_alen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) via_alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) nla = nla_find(attrs, attrlen, RTA_NEWDST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (nla &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) nla_get_labels(nla, MAX_NEW_LABELS, &n_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) NULL, NULL) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *max_labels = max_t(u8, *max_labels, n_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* number of nexthops is tracked by a u8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Check for overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (nhs == 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) nhs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) rtnh = rtnh_next(rtnh, &remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* leftover implies invalid nexthop configuration, discard it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return remaining > 0 ? 0 : nhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static int mpls_nh_build_multi(struct mpls_route_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct mpls_route *rt, u8 max_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct rtnexthop *rtnh = cfg->rc_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct nlattr *nla_via, *nla_newdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) int remaining = cfg->rc_mp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) u8 nhs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) change_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int attrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) nla_via = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) nla_newdst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!rtnh_ok(rtnh, remaining))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* neither weighted multipath nor any flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * are supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (rtnh->rtnh_hops || rtnh->rtnh_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) attrlen = rtnh_attrlen(rtnh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (attrlen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct nlattr *attrs = rtnh_attrs(rtnh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) nla_via = nla_find(attrs, attrlen, RTA_VIA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) rtnh->rtnh_ifindex, nla_via, nla_newdst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) max_labels, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rt->rt_nhn_alive--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) rtnh = rtnh_next(rtnh, &remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) nhs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rt->rt_nhn = nhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static bool mpls_label_ok(struct net *net, unsigned int *index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) bool is_ok = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Reserved labels may not be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) is_ok = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* The full 20 bit range may not be supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (is_ok && *index >= net->mpls.platform_labels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) "Label >= configured maximum in platform_labels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) is_ok = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) *index = array_index_nospec(*index, net->mpls.platform_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return is_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static int mpls_route_add(struct mpls_route_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct net *net = cfg->rc_nlinfo.nl_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct mpls_route *rt, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) u8 max_via_alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) u8 max_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u8 nhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) index = cfg->rc_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* If a label was not specified during insert pick one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if ((index == LABEL_NOT_SPECIFIED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) (cfg->rc_nlflags & NLM_F_CREATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) index = find_free_label(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!mpls_label_ok(net, &index, extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* Append makes no sense with mpls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (cfg->rc_nlflags & NLM_F_APPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) NL_SET_ERR_MSG(extack, "MPLS does not support route append");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) old = rtnl_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (cfg->rc_mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) cfg->rc_via_alen, &max_via_alen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) &max_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) max_via_alen = cfg->rc_via_alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) max_labels = cfg->rc_output_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) nhs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (nhs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) NL_SET_ERR_MSG(extack, "Route does not contain a nexthop");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (IS_ERR(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) err = PTR_ERR(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rt->rt_protocol = cfg->rc_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rt->rt_payload_type = cfg->rc_payload_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (cfg->rc_mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) err = mpls_nh_build_multi(cfg, rt, max_labels, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) err = mpls_nh_build_from_cfg(cfg, rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) goto freert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) freert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) mpls_rt_free(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int mpls_route_del(struct mpls_route_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct net *net = cfg->rc_nlinfo.nl_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) index = cfg->rc_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!mpls_label_ok(net, &index, extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static void mpls_get_stats(struct mpls_dev *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct mpls_link_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct mpls_pcpu_stats *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) memset(stats, 0, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct mpls_link_stats local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) p = per_cpu_ptr(mdev->stats, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) start = u64_stats_fetch_begin(&p->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) local = p->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) } while (u64_stats_fetch_retry(&p->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) stats->rx_packets += local.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) stats->rx_bytes += local.rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) stats->tx_packets += local.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) stats->tx_bytes += local.tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) stats->rx_errors += local.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) stats->tx_errors += local.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) stats->rx_dropped += local.rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) stats->tx_dropped += local.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) stats->rx_noroute += local.rx_noroute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static int mpls_fill_stats_af(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct mpls_link_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) sizeof(struct mpls_link_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) MPLS_STATS_UNSPEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) stats = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) mpls_get_stats(mdev, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static size_t mpls_get_stats_af_size(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return nla_total_size_64bit(sizeof(struct mpls_link_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) u32 portid, u32 seq, int event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned int flags, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct netconfmsg *ncm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) bool all = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (type == NETCONFA_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) all = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ncm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ncm->ncm_family = AF_MPLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if ((all || type == NETCONFA_INPUT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) nla_put_s32(skb, NETCONFA_INPUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) mdev->input_enabled) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int mpls_netconf_msgsize_devconf(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) + nla_total_size(4); /* NETCONFA_IFINDEX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) bool all = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (type == NETCONFA_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) all = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (all || type == NETCONFA_INPUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) size += nla_total_size(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static void mpls_netconf_notify_devconf(struct net *net, int event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int type, struct mpls_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) WARN_ON(err == -EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) [NETCONFA_IFINDEX] = { .len = sizeof(int) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static int mpls_netconf_valid_get_req(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) const struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) NL_SET_ERR_MSG_MOD(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) "Invalid header for netconf get request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!netlink_strict_get_check(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) tb, NETCONFA_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) devconf_mpls_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) tb, NETCONFA_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) devconf_mpls_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) for (i = 0; i <= NETCONFA_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (!tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case NETCONFA_IFINDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct net *net = sock_net(in_skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct nlattr *tb[NETCONFA_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) int ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!tb[NETCONFA_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dev = __dev_get_by_index(net, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) err = mpls_netconf_fill_devconf(skb, mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) NETLINK_CB(in_skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) NETCONFA_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) WARN_ON(err == -EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static int mpls_netconf_dump_devconf(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) const struct nlmsghdr *nlh = cb->nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int idx, s_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int h, s_h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (cb->strict_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct netlink_ext_ack *extack = cb->extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct netconfmsg *ncm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) s_h = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) s_idx = idx = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) head = &net->dev_index_head[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) cb->seq = net->dev_base_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) hlist_for_each_entry_rcu(dev, head, index_hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (idx < s_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (mpls_netconf_fill_devconf(skb, mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) RTM_NEWNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) NLM_F_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) NETCONFA_ALL) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) nl_dump_check_consistent(cb, nlmsg_hdr(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) cb->args[0] = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cb->args[1] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) #define MPLS_PERDEV_SYSCTL_OFFSET(field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) (&((struct mpls_dev *)0)->field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static int mpls_conf_proc(struct ctl_table *ctl, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) int oval = *(int *)ctl->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct mpls_dev *mdev = ctl->extra1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) int i = (int *)ctl->data - (int *)mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct net *net = ctl->extra2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) int val = *(int *)ctl->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (i == offsetof(struct mpls_dev, input_enabled) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) val != oval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) mpls_netconf_notify_devconf(net, RTM_NEWNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) NETCONFA_INPUT, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static const struct ctl_table mpls_dev_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) .procname = "input",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .proc_handler = mpls_conf_proc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int mpls_dev_sysctl_register(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct mpls_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct ctl_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* Table data contains only offsets relative to the base of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * the mdev at this point, so make them absolute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) table[i].data = (char *)mdev + (uintptr_t)table[i].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) table[i].extra1 = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) table[i].extra2 = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) mdev->sysctl = register_net_sysctl(net, path, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (!mdev->sysctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static void mpls_dev_sysctl_unregister(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct mpls_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct ctl_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) table = mdev->sysctl->ctl_table_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) unregister_net_sysctl_table(mdev->sysctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static struct mpls_dev *mpls_add_dev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!mdev->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct mpls_pcpu_stats *mpls_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) mpls_stats = per_cpu_ptr(mdev->stats, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) u64_stats_init(&mpls_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) mdev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) err = mpls_dev_sysctl_register(dev, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) rcu_assign_pointer(dev->mpls_ptr, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) free_percpu(mdev->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) kfree(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static void mpls_dev_destroy_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) free_percpu(mdev->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) kfree(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static int mpls_ifdown(struct net_device *dev, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) for (index = 0; index < net->mpls.platform_labels; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct mpls_route *rt = rtnl_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) bool nh_del = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) u8 alive = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (event == NETDEV_UNREGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) u8 deleted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) for_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) struct net_device *nh_dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (!nh_dev || nh_dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) deleted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (nh_dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) nh_del = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* if there are no more nexthops, delete the route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (deleted == rt->rt_nhn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) mpls_route_update(net, index, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (nh_del) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) size_t size = sizeof(*rt) + rt->rt_nhn *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) rt->rt_nh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct mpls_route *orig = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) rt = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) memcpy(rt, orig, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) change_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) unsigned int nh_flags = nh->nh_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (rtnl_dereference(nh->nh_dev) != dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) nh_flags |= RTNH_F_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) case NETDEV_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) nh_flags |= RTNH_F_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (event == NETDEV_UNREGISTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) RCU_INIT_POINTER(nh->nh_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (nh->nh_flags != nh_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) WRITE_ONCE(nh->nh_flags, nh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) alive++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) WRITE_ONCE(rt->rt_nhn_alive, alive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (nh_del)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) mpls_route_update(net, index, rt, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static void mpls_ifup(struct net_device *dev, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) u8 alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) for (index = 0; index < net->mpls.platform_labels; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct mpls_route *rt = rtnl_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) alive = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) change_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) unsigned int nh_flags = nh->nh_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct net_device *nh_dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (!(nh_flags & flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) alive++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (nh_dev != dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) alive++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) nh_flags &= ~flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) WRITE_ONCE(nh->nh_flags, nh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) WRITE_ONCE(rt->rt_nhn_alive, alive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct mpls_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (event == NETDEV_REGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) mdev = mpls_add_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (IS_ERR(mdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return notifier_from_errno(PTR_ERR(mdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) err = mpls_ifdown(dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) case NETDEV_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) flags = dev_get_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (flags & (IFF_RUNNING | IFF_LOWER_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) mpls_ifup(dev, RTNH_F_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) case NETDEV_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) flags = dev_get_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) err = mpls_ifdown(dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) err = mpls_ifdown(dev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (mdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) mpls_dev_sysctl_unregister(dev, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) RCU_INIT_POINTER(dev->mpls_ptr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) case NETDEV_CHANGENAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) mdev = mpls_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (mdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) mpls_dev_sysctl_unregister(dev, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) err = mpls_dev_sysctl_register(dev, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static struct notifier_block mpls_dev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) .notifier_call = mpls_dev_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static int nla_put_via(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) u8 table, const void *addr, int alen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static const int table_to_family[NEIGH_NR_TABLES + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct rtvia *via;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) int family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) nla = nla_reserve(skb, RTA_VIA, alen + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (table <= NEIGH_NR_TABLES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) family = table_to_family[table];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) via = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) via->rtvia_family = family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) memcpy(via->rtvia_addr, addr, alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) int nla_put_labels(struct sk_buff *skb, int attrtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) u8 labels, const u32 label[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct mpls_shim_hdr *nla_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) bool bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) nla = nla_reserve(skb, attrtype, labels*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) nla_label = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) bos = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) for (i = labels - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) bos = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) EXPORT_SYMBOL_GPL(nla_put_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) u32 label[], struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) unsigned len = nla_len(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct mpls_shim_hdr *nla_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) u8 nla_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) bool bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* len needs to be an even multiple of 4 (the label size). Number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * of labels is a u8 so check for overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (len & 3 || len / 4 > 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) "Invalid length for labels attribute");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /* Limit the number of new labels allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) nla_labels = len/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (nla_labels > max_labels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) NL_SET_ERR_MSG(extack, "Too many labels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* when label == NULL, caller wants number of labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (!label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) nla_label = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bos = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) for (i = nla_labels - 1; i >= 0; i--, bos = false) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct mpls_entry_decoded dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dec = mpls_entry_decode(nla_label + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /* Ensure the bottom of stack flag is properly set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * and ttl and tc are both clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (dec.ttl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) "TTL in label must be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (dec.tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) "Traffic class in label must be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (dec.bos != bos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) NL_SET_BAD_ATTR(extack, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (bos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) "BOS bit must be set in first label");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) "BOS bit can only be set in first label");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) switch (dec.label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) case MPLS_LABEL_IMPLNULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* RFC3032: This is a label that an LSR may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * assign and distribute, but which never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * actually appears in the encapsulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) "Implicit NULL Label (3) can not be used in encapsulation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) label[i] = dec.label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) *labels = nla_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) EXPORT_SYMBOL_GPL(nla_get_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static int rtm_to_route_config(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct mpls_route_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct rtmsg *rtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct nlattr *tb[RTA_MAX+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) rtm_mpls_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) rtm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (rtm->rtm_family != AF_MPLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) NL_SET_ERR_MSG(extack, "Invalid address family in rtmsg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (rtm->rtm_dst_len != 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) NL_SET_ERR_MSG(extack, "rtm_dst_len must be 20 for MPLS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (rtm->rtm_src_len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) NL_SET_ERR_MSG(extack, "rtm_src_len must be 0 for MPLS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (rtm->rtm_tos != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) NL_SET_ERR_MSG(extack, "rtm_tos must be 0 for MPLS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (rtm->rtm_table != RT_TABLE_MAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) "MPLS only supports the main route table");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* Any value is acceptable for rtm_protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* As mpls uses destination specific addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * (or source specific address in the case of multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * all addresses have universal scope.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (rtm->rtm_scope != RT_SCOPE_UNIVERSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) "Invalid route scope - MPLS only supports UNIVERSE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (rtm->rtm_type != RTN_UNICAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) "Invalid route type - MPLS only supports UNICAST");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (rtm->rtm_flags != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) NL_SET_ERR_MSG(extack, "rtm_flags must be 0 for MPLS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) cfg->rc_label = LABEL_NOT_SPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) cfg->rc_protocol = rtm->rtm_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) cfg->rc_nlflags = nlh->nlmsg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) cfg->rc_nlinfo.nlh = nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) for (index = 0; index <= RTA_MAX; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) struct nlattr *nla = tb[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) case RTA_OIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) cfg->rc_ifindex = nla_get_u32(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) case RTA_NEWDST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (nla_get_labels(nla, MAX_NEW_LABELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) &cfg->rc_output_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) cfg->rc_output_label, extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) case RTA_DST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) u8 label_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (nla_get_labels(nla, 1, &label_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) &cfg->rc_label, extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) &cfg->rc_label, extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) case RTA_GATEWAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) case RTA_VIA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (nla_get_via(nla, &cfg->rc_via_alen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) &cfg->rc_via_table, cfg->rc_via,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) case RTA_MULTIPATH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) cfg->rc_mp = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) cfg->rc_mp_len = nla_len(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) case RTA_TTL_PROPAGATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) u8 ttl_propagate = nla_get_u8(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (ttl_propagate > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) NL_SET_ERR_MSG_ATTR(extack, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) "RTA_TTL_PROPAGATE can only be 0 or 1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) cfg->rc_ttl_propagate = ttl_propagate ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) MPLS_TTL_PROP_ENABLED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) MPLS_TTL_PROP_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /* Unsupported attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) struct mpls_route_config *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (!cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) err = rtm_to_route_config(skb, nlh, cfg, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) err = mpls_route_del(cfg, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) kfree(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct mpls_route_config *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (!cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) err = rtm_to_route_config(skb, nlh, cfg, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) err = mpls_route_add(cfg, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) kfree(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) u32 label, struct mpls_route *rt, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) struct rtmsg *rtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (nlh == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) rtm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) rtm->rtm_family = AF_MPLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) rtm->rtm_dst_len = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) rtm->rtm_src_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) rtm->rtm_tos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) rtm->rtm_table = RT_TABLE_MAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) rtm->rtm_protocol = rt->rt_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) rtm->rtm_scope = RT_SCOPE_UNIVERSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) rtm->rtm_type = RTN_UNICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) rtm->rtm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (nla_put_labels(skb, RTA_DST, 1, &label))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) bool ttl_propagate =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) ttl_propagate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (rt->rt_nhn == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) const struct mpls_nh *nh = rt->rt_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (nh->nh_labels &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) nh->nh_label))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) nh->nh_via_alen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) dev = rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (nh->nh_flags & RTNH_F_LINKDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) rtm->rtm_flags |= RTNH_F_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (nh->nh_flags & RTNH_F_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) rtm->rtm_flags |= RTNH_F_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct rtnexthop *rtnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct nlattr *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) u8 linkdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) u8 dead = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (!mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) for_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) dev = rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!rtnh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) rtnh->rtnh_ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (nh->nh_flags & RTNH_F_LINKDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) linkdown++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (nh->nh_flags & RTNH_F_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) rtnh->rtnh_flags |= RTNH_F_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) dead++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) nh->nh_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) nh->nh_label))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) nla_put_via(skb, nh->nh_via_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) mpls_nh_via(rt, nh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) nh->nh_via_alen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /* length of rtnetlink header + attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (linkdown == rt->rt_nhn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) rtm->rtm_flags |= RTNH_F_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (dead == rt->rt_nhn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) rtm->rtm_flags |= RTNH_F_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) nla_nest_end(skb, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) #if IS_ENABLED(CONFIG_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) struct fib_dump_filter *filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return ip_valid_fib_dump_req(net, nlh, filter, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) struct fib_dump_filter *filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) struct netlink_ext_ack *extack = cb->extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) struct nlattr *tb[RTA_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct rtmsg *rtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) rtm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) rtm->rtm_table || rtm->rtm_scope || rtm->rtm_type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) rtm->rtm_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (rtm->rtm_protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) filter->protocol = rtm->rtm_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) filter->filter_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) cb->answer_flags = NLM_F_DUMP_FILTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) rtm_mpls_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) for (i = 0; i <= RTA_MAX; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) int ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (i == RTA_OIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) ifindex = nla_get_u32(tb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) filter->dev = __dev_get_by_index(net, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (!filter->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) filter->filter_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) } else if (tb[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static bool mpls_rt_uses_dev(struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct net_device *nh_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (rt->rt_nhn == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct mpls_nh *nh = rt->rt_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) nh_dev = rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (dev == nh_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) for_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) nh_dev = rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (nh_dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) const struct nlmsghdr *nlh = cb->nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct fib_dump_filter filter = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) unsigned int flags = NLM_F_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) size_t platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (cb->strict_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) err = mpls_valid_fib_dump_req(net, nlh, &filter, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) /* for MPLS, there is only 1 table with fixed type and flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) * If either are set in the filter then return nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) (filter.rt_type && filter.rt_type != RTN_UNICAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) filter.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) index = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (index < MPLS_LABEL_FIRST_UNRESERVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) index = MPLS_LABEL_FIRST_UNRESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) platform_labels = net->mpls.platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (filter.filter_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) flags |= NLM_F_DUMP_FILTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) for (; index < platform_labels; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) struct mpls_route *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) rt = rtnl_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if ((filter.dev && !mpls_rt_uses_dev(rt, filter.dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) (filter.protocol && rt->rt_protocol != filter.protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) cb->nlh->nlmsg_seq, RTM_NEWROUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) index, rt, flags) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) cb->args[0] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) size_t payload =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) NLMSG_ALIGN(sizeof(struct rtmsg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) + nla_total_size(4) /* RTA_DST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) + nla_total_size(1); /* RTA_TTL_PROPAGATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (rt->rt_nhn == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct mpls_nh *nh = rt->rt_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (nh->nh_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) payload += nla_total_size(4); /* RTA_OIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) payload += nla_total_size(2 + nh->nh_via_alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (nh->nh_labels) /* RTA_NEWDST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) payload += nla_total_size(nh->nh_labels * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /* each nexthop is packed in an attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) size_t nhsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) for_nexthops(rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (!rtnl_dereference(nh->nh_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) nhsize += nla_total_size(sizeof(struct rtnexthop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) /* RTA_VIA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) nhsize += nla_total_size(2 + nh->nh_via_alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (nh->nh_labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) nhsize += nla_total_size(nh->nh_labels * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) } endfor_nexthops(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /* nested attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) payload += nla_total_size(nhsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) struct nlmsghdr *nlh, struct net *net, u32 portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) unsigned int nlm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) u32 seq = nlh ? nlh->nlmsg_seq : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) int err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) WARN_ON(err == -EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) static int mpls_valid_getroute_req(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) const struct nlmsghdr *nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct rtmsg *rtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) NL_SET_ERR_MSG_MOD(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) "Invalid header for get route request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (!netlink_strict_get_check(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) rtm_mpls_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) rtm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if ((rtm->rtm_dst_len && rtm->rtm_dst_len != 20) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) rtm->rtm_src_len || rtm->rtm_tos || rtm->rtm_table ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) rtm->rtm_protocol || rtm->rtm_scope || rtm->rtm_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) NL_SET_ERR_MSG_MOD(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) "Invalid flags for get route request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) rtm_mpls_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) NL_SET_ERR_MSG_MOD(extack, "rtm_dst_len must be 20 for MPLS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) for (i = 0; i <= RTA_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (!tb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) case RTA_DST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) case RTA_NEWDST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct net *net = sock_net(in_skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) u32 portid = NETLINK_CB(in_skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) u32 in_label = LABEL_NOT_SPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct nlattr *tb[RTA_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) u32 labels[MAX_NEW_LABELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) struct mpls_shim_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) unsigned int hdr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct mpls_route *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct rtmsg *rtm, *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) struct mpls_nh *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) u8 n_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) rtm = nlmsg_data(in_nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (tb[RTA_DST]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) u8 label_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (nla_get_labels(tb[RTA_DST], 1, &label_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) &in_label, extack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (!mpls_label_ok(net, &in_label, extack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) rt = mpls_route_input_rcu(net, in_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (!rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) err = mpls_dump_route(skb, portid, in_nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) RTM_NEWROUTE, in_label, rt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) WARN_ON(err == -EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) goto errout_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) return rtnl_unicast(skb, net, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (tb[RTA_NEWDST]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (nla_get_labels(tb[RTA_NEWDST], MAX_NEW_LABELS, &n_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) labels, extack) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) hdr_size = n_labels * sizeof(struct mpls_shim_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) skb->protocol = htons(ETH_P_MPLS_UC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (hdr_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) bool bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (skb_cow(skb, hdr_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) goto errout_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) skb_reserve(skb, hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) skb_push(skb, hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /* Push new labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) hdr = mpls_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) bos = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) for (i = n_labels - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) hdr[i] = mpls_entry_encode(labels[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 1, 0, bos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) bos = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) nh = mpls_select_multipath(rt, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (!nh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) goto errout_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (hdr_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) skb_pull(skb, hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) nlh = nlmsg_put(skb, portid, in_nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) RTM_NEWROUTE, sizeof(*r), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (!nlh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) goto errout_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) r = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) r->rtm_family = AF_MPLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) r->rtm_dst_len = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) r->rtm_src_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) r->rtm_table = RT_TABLE_MAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) r->rtm_type = RTN_UNICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) r->rtm_scope = RT_SCOPE_UNIVERSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) r->rtm_protocol = rt->rt_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) r->rtm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (nla_put_labels(skb, RTA_DST, 1, &in_label))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (nh->nh_labels &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) nh->nh_label))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) nh->nh_via_alen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) dev = rtnl_dereference(nh->nh_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) err = rtnl_unicast(skb, net, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) errout_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) static int resize_platform_label_table(struct net *net, size_t limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) size_t size = sizeof(struct mpls_route *) * limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) size_t old_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) size_t cp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct mpls_route __rcu **labels = NULL, **old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct mpls_route *rt0 = NULL, *rt2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) unsigned index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) labels = kvzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (!labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) goto nolabels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* In case the predefined labels need to be populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (limit > MPLS_LABEL_IPV4NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) struct net_device *lo = net->loopback_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (IS_ERR(rt0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) goto nort0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) rt0->rt_protocol = RTPROT_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) rt0->rt_payload_type = MPT_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) rt0->rt_nh->nh_via_alen = lo->addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) lo->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (limit > MPLS_LABEL_IPV6NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct net_device *lo = net->loopback_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (IS_ERR(rt2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) goto nort2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) rt2->rt_protocol = RTPROT_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) rt2->rt_payload_type = MPT_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) rt2->rt_nh->nh_via_alen = lo->addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) lo->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) /* Remember the original table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) old = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) old_limit = net->mpls.platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) /* Free any labels beyond the new table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) for (index = limit; index < old_limit; index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) mpls_route_update(net, index, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /* Copy over the old labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) cp_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (old_limit < limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) cp_size = old_limit * sizeof(struct mpls_route *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) memcpy(labels, old, cp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /* If needed set the predefined labels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) (limit > MPLS_LABEL_IPV6NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) rt2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) (limit > MPLS_LABEL_IPV4NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) rt0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* Update the global pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) net->mpls.platform_labels = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) rcu_assign_pointer(net->mpls.platform_label, labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) mpls_rt_free(rt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) mpls_rt_free(rt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) kvfree(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) nort2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) mpls_rt_free(rt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) nort0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) kvfree(labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) nolabels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) static int mpls_platform_labels(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct net *net = table->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) int platform_labels = net->mpls.platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct ctl_table tmp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) .procname = table->procname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) .data = &platform_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) .mode = table->mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) .extra1 = SYSCTL_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) .extra2 = &label_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (write && ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) ret = resize_platform_label_table(net, platform_labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) #define MPLS_NS_SYSCTL_OFFSET(field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) (&((struct net *)0)->field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) static const struct ctl_table mpls_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) .procname = "platform_labels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) .data = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) .proc_handler = mpls_platform_labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) .procname = "ip_ttl_propagate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) .data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) .proc_handler = proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) .extra1 = SYSCTL_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) .extra2 = SYSCTL_ONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) .procname = "default_ttl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) .data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) .proc_handler = proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) .extra1 = SYSCTL_ONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) .extra2 = &ttl_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) static int mpls_net_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) struct ctl_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) net->mpls.platform_labels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) net->mpls.platform_label = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) net->mpls.ip_ttl_propagate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) net->mpls.default_ttl = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /* Table data contains only offsets relative to the base of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * the mdev at this point, so make them absolute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) table[i].data = (char *)net + (uintptr_t)table[i].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (net->mpls.ctl == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) static void mpls_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) struct mpls_route __rcu **platform_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) size_t platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct ctl_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) table = net->mpls.ctl->ctl_table_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) unregister_net_sysctl_table(net->mpls.ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) /* An rcu grace period has passed since there was a device in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * the network namespace (and thus the last in flight packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) * left this network namespace. This is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * unregister_netdevice_many and netdev_run_todo has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * for each network device that was in this network namespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) * As such no additional rcu synchronization is necessary when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * freeing the platform_label table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) platform_label = rtnl_dereference(net->mpls.platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) platform_labels = net->mpls.platform_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) for (index = 0; index < platform_labels; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct mpls_route *rt = rtnl_dereference(platform_label[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) RCU_INIT_POINTER(platform_label[index], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) mpls_notify_route(net, index, rt, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) mpls_rt_free(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) kvfree(platform_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) static struct pernet_operations mpls_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) .init = mpls_net_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) .exit = mpls_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) static struct rtnl_af_ops mpls_af_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) .family = AF_MPLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) .fill_stats_af = mpls_fill_stats_af,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) .get_stats_af_size = mpls_get_stats_af_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static int __init mpls_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) err = register_pernet_subsys(&mpls_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) err = register_netdevice_notifier(&mpls_dev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) goto out_unregister_pernet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) dev_add_pack(&mpls_packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) rtnl_af_register(&mpls_af_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) mpls_rtm_newroute, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) mpls_rtm_delroute, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) mpls_getroute, mpls_dump_routes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) mpls_netconf_get_devconf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) mpls_netconf_dump_devconf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) err = ipgre_tunnel_encap_add_mpls_ops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) pr_err("Can't add mpls over gre tunnel ops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) out_unregister_pernet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) unregister_pernet_subsys(&mpls_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) module_init(mpls_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static void __exit mpls_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) rtnl_unregister_all(PF_MPLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) rtnl_af_unregister(&mpls_af_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) dev_remove_pack(&mpls_packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) unregister_netdevice_notifier(&mpls_dev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) unregister_pernet_subsys(&mpls_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) ipgre_tunnel_encap_del_mpls_ops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) module_exit(mpls_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) MODULE_DESCRIPTION("MultiProtocol Label Switching");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) MODULE_ALIAS_NETPROTO(PF_MPLS);