^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * xfrm_policy.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Mitsuru KANDA @USAGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Kazunori MIYAZAWA @USAGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * IPv6 support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Kazunori MIYAZAWA @USAGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * YOSHIFUJI Hideaki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Split up af-specific portion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Derek Atkins <derek@ihtfp.com> Add the post_input processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/netfilter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/if_tunnel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/flow.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifndef __GENKSYMS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <net/inet_ecn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifndef __GENKSYMS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <net/gre.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #if IS_ENABLED(CONFIG_IPV6_MIP6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <net/mip6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_XFRM_STATISTICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <net/snmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifdef CONFIG_XFRM_ESPINTCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <net/espintcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "xfrm_hash.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define XFRM_MAX_QUEUE_LEN 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct xfrm_flo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dst_entry *dst_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* prefixes smaller than this are stored in lists, not trees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define INEXACT_PREFIXLEN_IPV4 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define INEXACT_PREFIXLEN_IPV6 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct xfrm_pol_inexact_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) xfrm_address_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u8 prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct rb_root root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* the policies matching this node, can be empty list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct hlist_head hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* xfrm inexact policy search tree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * +---- root_d: sorted by daddr:prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * | xfrm_pol_inexact_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * | +- root: sorted by saddr/prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * | | xfrm_pol_inexact_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * | | + root: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * | | + hhead: saddr:daddr policies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * | +- coarse policies and all any:daddr policies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * +---- root_s: sorted by saddr:prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * | xfrm_pol_inexact_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * | + root: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * | + hhead: saddr:any policies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * +---- coarse policies and all any:any policies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Lookups return four candidate lists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * 1. any:any list from top-level xfrm_pol_inexact_bin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * 2. any:daddr list from daddr tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * 3. saddr:daddr list from 2nd level daddr tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * 4. saddr:any list from saddr tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * This result set then needs to be searched for the policy with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * the lowest priority. If two results have same prio, youngest one wins.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct xfrm_pol_inexact_key {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) possible_net_t net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 if_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u16 family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u8 dir, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct xfrm_pol_inexact_bin {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct xfrm_pol_inexact_key k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct rhash_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* list containing '*:*' policies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct hlist_head hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) seqcount_spinlock_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* tree sorted by daddr/prefix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct rb_root root_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* tree sorted by saddr/prefix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct rb_root root_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* slow path below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct list_head inexact_bins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) enum xfrm_pol_inexact_candidate_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) XFRM_POL_CAND_BOTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) XFRM_POL_CAND_SADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) XFRM_POL_CAND_DADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) XFRM_POL_CAND_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) XFRM_POL_CAND_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct xfrm_pol_inexact_candidates {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct hlist_head *res[XFRM_POL_CAND_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static DEFINE_SPINLOCK(xfrm_if_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static struct kmem_cache *xfrm_dst_cache __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static struct rhashtable xfrm_policy_inexact_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static const struct rhashtable_params xfrm_pol_inexact_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int stale_bundle(struct dst_entry *dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int xfrm_bundle_ok(struct xfrm_dst *xdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void xfrm_policy_queue_process(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static struct xfrm_pol_inexact_bin *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct xfrm_pol_inexact_bin *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) xfrm_policy_inexact_lookup_rcu(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u8 type, u16 family, u8 dir, u32 if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bool excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct xfrm_policy *policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct xfrm_pol_inexact_bin *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) const xfrm_address_t *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) const xfrm_address_t *daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return refcount_inc_not_zero(&policy->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) const struct flowi4 *fl4 = &fl->u.ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (fl4->flowi4_proto == sel->proto || !sel->proto) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) const struct flowi6 *fl6 = &fl->u.ip6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) (fl6->flowi6_proto == sel->proto || !sel->proto) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return __xfrm4_selector_match(sel, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return __xfrm6_selector_match(sel, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const struct xfrm_policy_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (unlikely(!afinfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Called with rcu_read_lock(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static const struct xfrm_if_cb *xfrm_if_get_cb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return rcu_dereference(xfrm_if_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) const xfrm_address_t *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) const xfrm_address_t *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int family, u32 mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) const struct xfrm_policy_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) afinfo = xfrm_policy_get_afinfo(family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (unlikely(afinfo == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) EXPORT_SYMBOL(__xfrm_dst_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int tos, int oif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) xfrm_address_t *prev_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) xfrm_address_t *prev_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int family, u32 mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct net *net = xs_net(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) xfrm_address_t *saddr = &x->props.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) xfrm_address_t *daddr = &x->id.daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) saddr = x->coaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) daddr = prev_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) saddr = prev_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) daddr = x->coaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!IS_ERR(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (prev_saddr != saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (prev_daddr != daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline unsigned long make_jiffies(long secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return MAX_SCHEDULE_TIMEOUT-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return secs*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void xfrm_policy_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct xfrm_policy *xp = from_timer(xp, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) time64_t now = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) time64_t next = TIME64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int warn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) read_lock(&xp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (unlikely(xp->walk.dead))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dir = xfrm_policy_id2dir(xp->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (xp->lft.hard_add_expires_seconds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) time64_t tmo = xp->lft.hard_add_expires_seconds +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) xp->curlft.add_time - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (tmo <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (tmo < next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) next = tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (xp->lft.hard_use_expires_seconds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) time64_t tmo = xp->lft.hard_use_expires_seconds +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) (xp->curlft.use_time ? : xp->curlft.add_time) - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (tmo <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (tmo < next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) next = tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (xp->lft.soft_add_expires_seconds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) time64_t tmo = xp->lft.soft_add_expires_seconds +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) xp->curlft.add_time - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (tmo <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) warn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) tmo = XFRM_KM_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (tmo < next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) next = tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (xp->lft.soft_use_expires_seconds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) time64_t tmo = xp->lft.soft_use_expires_seconds +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) (xp->curlft.use_time ? : xp->curlft.add_time) - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (tmo <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) warn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) tmo = XFRM_KM_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (tmo < next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) next = tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) km_policy_expired(xp, dir, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (next != TIME64_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) xfrm_pol_hold(xp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) read_unlock(&xp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) xfrm_pol_put(xp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) expired:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) read_unlock(&xp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!xfrm_policy_delete(xp, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) km_policy_expired(xp, dir, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) xfrm_pol_put(xp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * SPD calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct xfrm_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) policy = kzalloc(sizeof(struct xfrm_policy), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) write_pnet(&policy->xp_net, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) INIT_LIST_HEAD(&policy->walk.all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) INIT_HLIST_NODE(&policy->bydst_inexact_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) INIT_HLIST_NODE(&policy->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) INIT_HLIST_NODE(&policy->byidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) rwlock_init(&policy->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) refcount_set(&policy->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) skb_queue_head_init(&policy->polq.hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) timer_setup(&policy->timer, xfrm_policy_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) timer_setup(&policy->polq.hold_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) xfrm_policy_queue_process, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) EXPORT_SYMBOL(xfrm_policy_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void xfrm_policy_destroy_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) security_xfrm_policy_free(policy->security);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kfree(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Destroy xfrm_policy: descendant resources must be released to this moment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void xfrm_policy_destroy(struct xfrm_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) BUG_ON(!policy->walk.dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) EXPORT_SYMBOL(xfrm_policy_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Rule must be locked. Release descendant resources, announce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * entry dead. The rule must be unlinked from lists to the moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void xfrm_policy_kill(struct xfrm_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) write_lock_bh(&policy->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) policy->walk.dead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) write_unlock_bh(&policy->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) atomic_inc(&policy->genid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (del_timer(&policy->polq.hold_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) xfrm_pol_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) skb_queue_purge(&policy->polq.hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (del_timer(&policy->timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) xfrm_pol_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) xfrm_pol_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static inline unsigned int idx_hash(struct net *net, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return __idx_hash(index, net->xfrm.policy_idx_hmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* calculate policy hash thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void __get_hash_thresh(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unsigned short family, int dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u8 *dbits, u8 *sbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *dbits = net->xfrm.policy_bydst[dir].dbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *sbits = net->xfrm.policy_bydst[dir].sbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *dbits = net->xfrm.policy_bydst[dir].dbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *sbits = net->xfrm.policy_bydst[dir].sbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *dbits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *sbits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static struct hlist_head *policy_hash_bysel(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) const struct xfrm_selector *sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned short family, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) u8 dbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) u8 sbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) __get_hash_thresh(net, family, dir, &dbits, &sbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) hash = __sel_hash(sel, family, hmask, dbits, sbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (hash == hmask + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static struct hlist_head *policy_hash_direct(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) const xfrm_address_t *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) const xfrm_address_t *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned short family, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) u8 dbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u8 sbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) __get_hash_thresh(net, family, dir, &dbits, &sbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static void xfrm_dst_hash_transfer(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct hlist_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct hlist_head *ndsttable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned int nhashmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct hlist_node *tmp, *entry0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned int h0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u8 dbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u8 sbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) redo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) hlist_for_each_entry_safe(pol, tmp, list, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pol->family, nhashmask, dbits, sbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!entry0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) hlist_del_rcu(&pol->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) hlist_add_head_rcu(&pol->bydst, ndsttable + h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) h0 = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (h != h0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) hlist_del_rcu(&pol->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) hlist_add_behind_rcu(&pol->bydst, entry0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) entry0 = &pol->bydst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!hlist_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) entry0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) goto redo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void xfrm_idx_hash_transfer(struct hlist_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct hlist_head *nidxtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) unsigned int nhashmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) hlist_for_each_entry_safe(pol, tmp, list, byidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) h = __idx_hash(pol->index, nhashmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) hlist_add_head(&pol->byidx, nidxtable+h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return ((old_hmask + 1) << 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static void xfrm_bydst_resize(struct net *net, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned int nhashmask = xfrm_new_hash_mask(hmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct hlist_head *ndst = xfrm_hash_alloc(nsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct hlist_head *odst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!ndst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) write_seqcount_begin(&xfrm_policy_hash_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) lockdep_is_held(&net->xfrm.xfrm_policy_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) for (i = hmask; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) net->xfrm.policy_bydst[dir].hmask = nhashmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) write_seqcount_end(&xfrm_policy_hash_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static void xfrm_byidx_resize(struct net *net, int total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned int hmask = net->xfrm.policy_idx_hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned int nhashmask = xfrm_new_hash_mask(hmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct hlist_head *oidx = net->xfrm.policy_byidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct hlist_head *nidx = xfrm_hash_alloc(nsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!nidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) for (i = hmask; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) net->xfrm.policy_byidx = nidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) net->xfrm.policy_idx_hmask = nhashmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned int cnt = net->xfrm.policy_count[dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *total += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if ((hmask + 1) < xfrm_policy_hashmax &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) cnt > hmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static inline int xfrm_byidx_should_resize(struct net *net, int total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) unsigned int hmask = net->xfrm.policy_idx_hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if ((hmask + 1) < xfrm_policy_hashmax &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) total > hmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) si->spdhcnt = net->xfrm.policy_idx_hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) si->spdhmcnt = xfrm_policy_hashmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) EXPORT_SYMBOL(xfrm_spd_getinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static DEFINE_MUTEX(hash_resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static void xfrm_hash_resize(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int dir, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) mutex_lock(&hash_resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (xfrm_bydst_should_resize(net, dir, &total))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) xfrm_bydst_resize(net, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (xfrm_byidx_should_resize(net, total))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) xfrm_byidx_resize(net, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mutex_unlock(&hash_resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Make sure *pol can be inserted into fastbin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Useful to check that later insert requests will be sucessful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * (provided xfrm_policy_lock is held throughout).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static struct xfrm_pol_inexact_bin *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct xfrm_pol_inexact_bin *bin, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct xfrm_pol_inexact_key k = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .family = pol->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) .type = pol->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .dir = dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .if_id = pol->if_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct net *net = xp_net(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) write_pnet(&k.net, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) xfrm_pol_inexact_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (bin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (!bin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bin->k = k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) INIT_HLIST_HEAD(&bin->hhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) bin->root_d = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) bin->root_s = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) &bin->k, &bin->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) xfrm_pol_inexact_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) kfree(bin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return IS_ERR(prev) ? NULL : prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int family, u8 prefixlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (xfrm_addr_any(addr, family))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) const xfrm_address_t *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) bool saddr_any, daddr_any;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u8 prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) addr = &policy->selector.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) prefixlen = policy->selector.prefixlen_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) addr = &policy->selector.daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) prefixlen = policy->selector.prefixlen_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return saddr_any && daddr_any;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) const xfrm_address_t *addr, u8 prefixlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) node->addr = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) node->prefixlen = prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static struct xfrm_pol_inexact_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct xfrm_pol_inexact_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) node = kzalloc(sizeof(*node), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) xfrm_pol_inexact_node_init(node, addr, prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static int xfrm_policy_addr_delta(const xfrm_address_t *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) const xfrm_address_t *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) u8 prefixlen, u16 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) u32 ma, mb, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned int pdw, pbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (prefixlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) mask = ~0U << (32 - prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ma = ntohl(a->a4) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mb = ntohl(b->a4) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (ma < mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) delta = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) else if (ma > mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) delta = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pdw = prefixlen >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) pbi = prefixlen & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (pdw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) delta = memcmp(a->a6, b->a6, pdw << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (pbi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) mask = ~0U << (32 - pbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ma = ntohl(a->a6[pdw]) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) mb = ntohl(b->a6[pdw]) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ma < mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) delta = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) else if (ma > mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) delta = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void xfrm_policy_inexact_list_reinsert(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct xfrm_pol_inexact_node *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) u16 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unsigned int matched_s, matched_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct xfrm_policy *policy, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) matched_s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) matched_d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct hlist_node *newpos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) bool matches_s, matches_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!policy->bydst_reinsert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) WARN_ON_ONCE(policy->family != family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) policy->bydst_reinsert = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) hlist_for_each_entry(p, &n->hhead, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (policy->priority > p->priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) newpos = &p->bydst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) else if (policy->priority == p->priority &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) policy->pos > p->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) newpos = &p->bydst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (newpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) hlist_add_behind_rcu(&policy->bydst, newpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) hlist_add_head_rcu(&policy->bydst, &n->hhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* paranoia checks follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Check that the reinserted policy matches at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * saddr or daddr for current node prefix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * Matching both is fine, matching saddr in one policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * (but not daddr) and then matching only daddr in another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * is a bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) &n->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) n->prefixlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) family) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) &n->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) n->prefixlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) family) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (matches_s && matches_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) WARN_ON_ONCE(!matches_s && !matches_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (matches_s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) matched_s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (matches_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) matched_d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) WARN_ON_ONCE(matched_s && matched_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static void xfrm_policy_inexact_node_reinsert(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct xfrm_pol_inexact_node *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct rb_root *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) u16 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct xfrm_pol_inexact_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct rb_node **p, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* we should not have another subtree here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) p = &new->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u8 prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) prefixlen = min(node->prefixlen, n->prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) prefixlen, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) } else if (delta > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) bool same_prefixlen = node->prefixlen == n->prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct xfrm_policy *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) hlist_for_each_entry(tmp, &n->hhead, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) tmp->bydst_reinsert = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) hlist_del_rcu(&tmp->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) node->prefixlen = prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) xfrm_policy_inexact_list_reinsert(net, node, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (same_prefixlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) kfree_rcu(n, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) rb_erase(*p, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) kfree_rcu(n, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) n = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) rb_link_node_rcu(&n->node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rb_insert_color(&n->node, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* merge nodes v and n */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static void xfrm_policy_inexact_node_merge(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct xfrm_pol_inexact_node *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct xfrm_pol_inexact_node *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) u16 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct xfrm_pol_inexact_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct xfrm_policy *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct rb_node *rnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* To-be-merged node v has a subtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Dismantle it and insert its nodes to n->root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) while ((rnode = rb_first(&v->root)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) rb_erase(&node->node, &v->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) xfrm_policy_inexact_node_reinsert(net, node, &n->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) hlist_for_each_entry(tmp, &v->hhead, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) tmp->bydst_reinsert = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) hlist_del_rcu(&tmp->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) xfrm_policy_inexact_list_reinsert(net, n, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static struct xfrm_pol_inexact_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) xfrm_policy_inexact_insert_node(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) xfrm_address_t *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) u16 family, u8 prefixlen, u8 dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct xfrm_pol_inexact_node *cached = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct rb_node **p, *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct xfrm_pol_inexact_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) delta = xfrm_policy_addr_delta(addr, &node->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) node->prefixlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (delta == 0 && prefixlen >= node->prefixlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) WARN_ON_ONCE(cached); /* ipsec policies got lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (delta < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (prefixlen < node->prefixlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) delta = xfrm_policy_addr_delta(addr, &node->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) prefixlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* This node is a subnet of the new prefix. It needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * to be removed and re-inserted with the smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * prefix and all nodes that are now also covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * by the reduced prefixlen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) rb_erase(&node->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!cached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) xfrm_pol_inexact_node_init(node, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) cached = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* This node also falls within the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * prefixlen. Merge the to-be-reinserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * node and this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) xfrm_policy_inexact_node_merge(net, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) cached, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) kfree_rcu(node, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) node = cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) rb_link_node_rcu(&node->node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) rb_insert_color(&node->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct xfrm_pol_inexact_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct rb_node *rn = rb_first(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) while (rn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) xfrm_policy_inexact_gc_tree(&node->root, rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) rn = rb_next(rn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) WARN_ON_ONCE(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rb_erase(&node->node, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) kfree_rcu(node, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) write_seqcount_begin(&b->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) write_seqcount_end(&b->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) !hlist_empty(&b->hhead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) WARN_ON_ONCE(net_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) xfrm_pol_inexact_params) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) list_del(&b->inexact_bins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) kfree_rcu(b, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct net *net = read_pnet(&b->k.net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) __xfrm_policy_inexact_prune_bin(b, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void __xfrm_policy_inexact_flush(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct xfrm_pol_inexact_bin *bin, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) __xfrm_policy_inexact_prune_bin(bin, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static struct hlist_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct xfrm_policy *policy, u8 dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct xfrm_pol_inexact_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) net = xp_net(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (xfrm_policy_inexact_insert_use_any_list(policy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return &bin->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) policy->selector.prefixlen_d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) write_seqcount_begin(&bin->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) n = xfrm_policy_inexact_insert_node(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) &bin->root_s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) &policy->selector.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) policy->selector.prefixlen_s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) write_seqcount_end(&bin->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return &n->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /* daddr is fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) write_seqcount_begin(&bin->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) n = xfrm_policy_inexact_insert_node(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) &bin->root_d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) &policy->selector.daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) policy->selector.prefixlen_d, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) write_seqcount_end(&bin->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* saddr is wildcard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) policy->selector.prefixlen_s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return &n->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) write_seqcount_begin(&bin->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) n = xfrm_policy_inexact_insert_node(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) &n->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) &policy->selector.saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) policy->family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) policy->selector.prefixlen_s, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) write_seqcount_end(&bin->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return &n->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct xfrm_pol_inexact_bin *bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct xfrm_policy *delpol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) bin = xfrm_policy_inexact_alloc_bin(policy, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!bin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) net = xp_net(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (!chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) __xfrm_policy_inexact_prune_bin(bin, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) delpol = xfrm_policy_insert_list(chain, policy, excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (delpol && excl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) __xfrm_policy_inexact_prune_bin(bin, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) chain = &net->xfrm.policy_inexact[dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) xfrm_policy_insert_inexact_list(chain, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (delpol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) __xfrm_policy_inexact_prune_bin(bin, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return delpol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void xfrm_hash_rebuild(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct net *net = container_of(work, struct net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) xfrm.policy_hthresh.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) unsigned int hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct xfrm_policy *policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct hlist_head *odst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct hlist_node *newpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) u8 lbits4, rbits4, lbits6, rbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) mutex_lock(&hash_resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* read selector prefixlen thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) lbits4 = net->xfrm.policy_hthresh.lbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) rbits4 = net->xfrm.policy_hthresh.rbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) lbits6 = net->xfrm.policy_hthresh.lbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) rbits6 = net->xfrm.policy_hthresh.rbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) write_seqcount_begin(&xfrm_policy_hash_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* make sure that we can insert the indirect policies again before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * we start with destructive action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct xfrm_pol_inexact_bin *bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) u8 dbits, sbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) dir = xfrm_policy_id2dir(policy->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (policy->family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dbits = rbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) sbits = lbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dbits = rbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) sbits = lbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (policy->family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) dbits = lbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) sbits = rbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dbits = lbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) sbits = rbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (policy->selector.prefixlen_d < dbits ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) policy->selector.prefixlen_s < sbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) bin = xfrm_policy_inexact_alloc_bin(policy, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!bin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /* reset the bydst and inexact table in all directions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) hlist_for_each_entry_safe(policy, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) &net->xfrm.policy_inexact[dir],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) bydst_inexact_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) hlist_del_rcu(&policy->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) hlist_del_init(&policy->bydst_inexact_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) hmask = net->xfrm.policy_bydst[dir].hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) odst = net->xfrm.policy_bydst[dir].table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) for (i = hmask; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) hlist_for_each_entry_safe(policy, n, odst + i, bydst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) hlist_del_rcu(&policy->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* dir out => dst = remote, src = local */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) net->xfrm.policy_bydst[dir].dbits4 = rbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) net->xfrm.policy_bydst[dir].sbits4 = lbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) net->xfrm.policy_bydst[dir].dbits6 = rbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) net->xfrm.policy_bydst[dir].sbits6 = lbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* dir in/fwd => dst = local, src = remote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) net->xfrm.policy_bydst[dir].dbits4 = lbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) net->xfrm.policy_bydst[dir].sbits4 = rbits4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) net->xfrm.policy_bydst[dir].dbits6 = lbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) net->xfrm.policy_bydst[dir].sbits6 = rbits6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* re-insert all policies by order of creation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (policy->walk.dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) dir = xfrm_policy_id2dir(policy->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (dir >= XFRM_POLICY_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* skip socket policies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) newpos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) chain = policy_hash_bysel(net, &policy->selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) policy->family, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (!chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) void *p = xfrm_policy_inexact_insert(policy, dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) hlist_for_each_entry(pol, chain, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (policy->priority >= pol->priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) newpos = &pol->bydst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (newpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) hlist_add_behind_rcu(&policy->bydst, newpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) hlist_add_head_rcu(&policy->bydst, chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) __xfrm_policy_inexact_flush(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) write_seqcount_end(&xfrm_policy_hash_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) mutex_unlock(&hash_resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) void xfrm_policy_hash_rebuild(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) schedule_work(&net->xfrm.policy_hthresh.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* Generate new index... KAME seems to generate them ordered by cost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * of an absolute inpredictability of ordering of rules. This will not pass. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static u32 idx_generator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct hlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct xfrm_policy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) idx = (idx_generator | dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) idx_generator += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) idx = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) idx = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) list = net->xfrm.policy_byidx + idx_hash(net, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) hlist_for_each_entry(p, list, byidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (p->index == idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) u32 *p1 = (u32 *) s1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) u32 *p2 = (u32 *) s2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int len = sizeof(struct xfrm_selector) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (p1[i] != p2[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static void xfrm_policy_requeue(struct xfrm_policy *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct xfrm_policy *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct xfrm_policy_queue *pq = &old->polq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct sk_buff_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (skb_queue_empty(&pq->hold_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) __skb_queue_head_init(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) spin_lock_bh(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) skb_queue_splice_init(&pq->hold_queue, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (del_timer(&pq->hold_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) xfrm_pol_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) spin_unlock_bh(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) pq = &new->polq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) spin_lock_bh(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) skb_queue_splice(&list, &pq->hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) pq->timeout = XFRM_QUEUE_TMO_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (!mod_timer(&pq->hold_timer, jiffies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) xfrm_pol_hold(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) spin_unlock_bh(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct xfrm_policy *pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return mark->v == pol->mark.v && mark->m == pol->mark.m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) const struct xfrm_pol_inexact_key *k = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) u32 a = k->type << 24 | k->dir << 16 | k->family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) const struct xfrm_pol_inexact_bin *b = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return xfrm_pol_bin_key(&b->k, 0, seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) const struct xfrm_pol_inexact_key *key = arg->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) const struct xfrm_pol_inexact_bin *b = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) ret = b->k.dir ^ key->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ret = b->k.type ^ key->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) ret = b->k.family ^ key->family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return b->k.if_id ^ key->if_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static const struct rhashtable_params xfrm_pol_inexact_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) .hashfn = xfrm_pol_bin_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) .obj_hashfn = xfrm_pol_bin_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) .obj_cmpfn = xfrm_pol_bin_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) .automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct xfrm_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct xfrm_policy *pol, *delpol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct hlist_node *newpos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) hlist_for_each_entry(pol, chain, bydst_inexact_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (pol->type == policy->type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) pol->if_id == policy->if_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) !selector_cmp(&pol->selector, &policy->selector) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) xfrm_policy_mark_match(&policy->mark, pol) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) xfrm_sec_ctx_match(pol->security, policy->security) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) !WARN_ON(delpol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) delpol = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (policy->priority > pol->priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) } else if (policy->priority >= pol->priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) newpos = &pol->bydst_inexact_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (delpol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (newpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) hlist_for_each_entry(pol, chain, bydst_inexact_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) pol->pos = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct xfrm_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) bool excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) hlist_for_each_entry(pol, chain, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (pol->type == policy->type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) pol->if_id == policy->if_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) !selector_cmp(&pol->selector, &policy->selector) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) xfrm_policy_mark_match(&policy->mark, pol) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) xfrm_sec_ctx_match(pol->security, policy->security) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) !WARN_ON(delpol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) delpol = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (policy->priority > pol->priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) } else if (policy->priority >= pol->priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) newpos = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (delpol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (newpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) hlist_add_head_rcu(&policy->bydst, chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return delpol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct net *net = xp_net(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct xfrm_policy *delpol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) delpol = xfrm_policy_insert_list(chain, policy, excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) delpol = xfrm_policy_inexact_insert(policy, dir, excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (IS_ERR(delpol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return PTR_ERR(delpol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) __xfrm_policy_link(policy, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* After previous checking, family can either be AF_INET or AF_INET6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (policy->family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) rt_genid_bump_ipv4(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) rt_genid_bump_ipv6(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (delpol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) xfrm_policy_requeue(delpol, policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) __xfrm_policy_unlink(delpol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) policy->curlft.add_time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) policy->curlft.use_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (!mod_timer(&policy->timer, jiffies + HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) xfrm_pol_hold(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (delpol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) xfrm_policy_kill(delpol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) else if (xfrm_bydst_should_resize(net, dir, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) schedule_work(&net->xfrm.policy_hash_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) EXPORT_SYMBOL(xfrm_policy_insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct xfrm_sec_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (!chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) hlist_for_each_entry(pol, chain, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (pol->type == type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) pol->if_id == if_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) xfrm_policy_mark_match(mark, pol) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) !selector_cmp(sel, &pol->selector) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) xfrm_sec_ctx_match(ctx, pol->security))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u8 type, int dir, struct xfrm_selector *sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct xfrm_sec_ctx *ctx, int delete, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct xfrm_pol_inexact_bin *bin = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct xfrm_policy *pol, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) chain = policy_hash_bysel(net, sel, sel->family, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (!chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) struct xfrm_pol_inexact_candidates cand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) bin = xfrm_policy_inexact_lookup(net, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) sel->family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (!bin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (!xfrm_policy_find_inexact_candidates(&cand, bin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) &sel->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) &sel->daddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct xfrm_policy *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if_id, type, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) sel, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!pol || tmp->pos < pol->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) pol = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) sel, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) xfrm_pol_hold(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) *err = security_xfrm_policy_delete(pol->security);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (*err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) __xfrm_policy_unlink(pol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ret = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (ret && delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) xfrm_policy_kill(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (bin && delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) xfrm_policy_inexact_prune_bin(bin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) u8 type, int dir, u32 id, int delete, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) struct xfrm_policy *pol, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) *err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (xfrm_policy_id2dir(id) != dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) chain = net->xfrm.policy_byidx + idx_hash(net, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) hlist_for_each_entry(pol, chain, byidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (pol->type == type && pol->index == id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) xfrm_pol_hold(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) *err = security_xfrm_policy_delete(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) pol->security);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (*err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) __xfrm_policy_unlink(pol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) ret = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (ret && delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) xfrm_policy_kill(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) EXPORT_SYMBOL(xfrm_policy_byid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) #ifdef CONFIG_SECURITY_NETWORK_XFRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (pol->walk.dead ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) pol->type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) err = security_xfrm_policy_delete(pol->security);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) xfrm_audit_policy_delete(pol, 0, task_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) int dir, err = 0, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) err = xfrm_policy_flush_secctx_check(net, type, task_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) dir = xfrm_policy_id2dir(pol->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (pol->walk.dead ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) dir >= XFRM_POLICY_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) pol->type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) __xfrm_policy_unlink(pol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) xfrm_audit_policy_delete(pol, 1, task_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) xfrm_policy_kill(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) __xfrm_policy_inexact_flush(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) err = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) EXPORT_SYMBOL(xfrm_policy_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) int (*func)(struct xfrm_policy *, int, int, void*),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) struct xfrm_policy_walk_entry *x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (walk->type >= XFRM_POLICY_TYPE_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) walk->type != XFRM_POLICY_TYPE_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (list_empty(&walk->walk.all) && walk->seq != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (list_empty(&walk->walk.all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) x = list_first_entry(&walk->walk.all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct xfrm_policy_walk_entry, all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (x->dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) pol = container_of(x, struct xfrm_policy, walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (walk->type != XFRM_POLICY_TYPE_ANY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) walk->type != pol->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) error = func(pol, xfrm_policy_id2dir(pol->index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) walk->seq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) list_move_tail(&walk->walk.all, &x->all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) walk->seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (walk->seq == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) list_del_init(&walk->walk.all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) EXPORT_SYMBOL(xfrm_policy_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) INIT_LIST_HEAD(&walk->walk.all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) walk->walk.dead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) walk->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) walk->seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) EXPORT_SYMBOL(xfrm_policy_walk_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (list_empty(&walk->walk.all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) list_del(&walk->walk.all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) EXPORT_SYMBOL(xfrm_policy_walk_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * Find policy to apply to this flow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * Returns 0 if policy found, else an -errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) static int xfrm_policy_match(const struct xfrm_policy *pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u8 type, u16 family, int dir, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) const struct xfrm_selector *sel = &pol->selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) int ret = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) bool match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (pol->family != family ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) pol->if_id != if_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) pol->type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) match = xfrm_selector_match(sel, fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static struct xfrm_pol_inexact_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) seqcount_spinlock_t *count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) const xfrm_address_t *addr, u16 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) const struct rb_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) seq = read_seqcount_begin(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) parent = rcu_dereference_raw(r->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) while (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct xfrm_pol_inexact_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) delta = xfrm_policy_addr_delta(addr, &node->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) node->prefixlen, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (delta < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) parent = rcu_dereference_raw(parent->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) } else if (delta > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) parent = rcu_dereference_raw(parent->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (read_seqcount_retry(count, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) struct xfrm_pol_inexact_bin *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) const xfrm_address_t *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) const xfrm_address_t *daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) struct xfrm_pol_inexact_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) u16 family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) family = b->k.family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) memset(cand, 0, sizeof(*cand));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static struct xfrm_pol_inexact_bin *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) u8 dir, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct xfrm_pol_inexact_key k = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) .family = family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) .type = type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) .dir = dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .if_id = if_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) write_pnet(&k.net, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) xfrm_pol_inexact_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static struct xfrm_pol_inexact_bin *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) u8 dir, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct xfrm_pol_inexact_bin *bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) static struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) __xfrm_policy_eval_candidates(struct hlist_head *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) struct xfrm_policy *prefer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) u8 type, u16 family, int dir, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) u32 priority = prefer ? prefer->priority : ~0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (!chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) hlist_for_each_entry_rcu(pol, chain, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (pol->priority > priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (err != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (prefer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) /* matches. Is it older than *prefer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) if (pol->priority == priority &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) prefer->pos < pol->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) return prefer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static struct xfrm_policy *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct xfrm_policy *prefer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) u8 type, u16 family, int dir, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) struct xfrm_policy *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) tmp = __xfrm_policy_eval_candidates(cand->res[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) prefer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) fl, type, family, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (IS_ERR(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) prefer = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return prefer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) u16 family, u8 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) struct xfrm_pol_inexact_candidates cand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) const xfrm_address_t *daddr, *saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct xfrm_pol_inexact_bin *bin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) struct xfrm_policy *pol, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) unsigned int sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) daddr = xfrm_flowi_daddr(fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) saddr = xfrm_flowi_saddr(fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (unlikely(!daddr || !saddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) chain = policy_hash_direct(net, daddr, saddr, family, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) hlist_for_each_entry_rcu(pol, chain, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (err == -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) ret = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) ret = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) daddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) goto skip_inexact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) ret = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (IS_ERR(pol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) skip_inexact:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (ret && !xfrm_pol_hold_rcu(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) u16 family, u8 dir, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) #ifdef CONFIG_XFRM_SUB_POLICY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (pol != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) u16 family, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) pol = rcu_dereference(sk->sk_policy[dir]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (pol != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) bool match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (pol->family != family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) match = xfrm_selector_match(&pol->selector, fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) pol->if_id != if_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) err = security_xfrm_policy_lookup(pol->security,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) fl->flowi_secid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (!xfrm_pol_hold_rcu(pol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) } else if (err == -ESRCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) pol = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct net *net = xp_net(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) list_add(&pol->walk.all, &net->xfrm.policy_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) net->xfrm.policy_count[dir]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) xfrm_pol_hold(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) struct net *net = xp_net(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (list_empty(&pol->walk.all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /* Socket policies are not hashed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (!hlist_unhashed(&pol->bydst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) hlist_del_rcu(&pol->bydst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) hlist_del_init(&pol->bydst_inexact_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) hlist_del(&pol->byidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) list_del_init(&pol->walk.all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) net->xfrm.policy_count[dir]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct net *net = xp_net(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) pol = __xfrm_policy_unlink(pol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) xfrm_policy_kill(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) EXPORT_SYMBOL(xfrm_policy_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct xfrm_policy *old_pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) #ifdef CONFIG_XFRM_SUB_POLICY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) old_pol = rcu_dereference_protected(sk->sk_policy[dir],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) lockdep_is_held(&net->xfrm.xfrm_policy_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) pol->curlft.add_time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) xfrm_sk_policy_link(pol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) rcu_assign_pointer(sk->sk_policy[dir], pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (old_pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) xfrm_policy_requeue(old_pol, pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* Unlinking succeeds always. This is the only function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * allowed to delete or replace socket policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) xfrm_sk_policy_unlink(old_pol, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (old_pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) xfrm_policy_kill(old_pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct net *net = xp_net(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (newp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) newp->selector = old->selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (security_xfrm_policy_clone(old->security,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) &newp->security)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) kfree(newp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return NULL; /* ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) newp->lft = old->lft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) newp->curlft = old->curlft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) newp->mark = old->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) newp->if_id = old->if_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) newp->action = old->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) newp->flags = old->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) newp->xfrm_nr = old->xfrm_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) newp->index = old->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) newp->type = old->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) newp->family = old->family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) memcpy(newp->xfrm_vec, old->xfrm_vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) newp->xfrm_nr*sizeof(struct xfrm_tmpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) xfrm_sk_policy_link(newp, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) xfrm_pol_put(newp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) return newp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) const struct xfrm_policy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct xfrm_policy *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) p = rcu_dereference(osk->sk_policy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) np = clone_policy(p, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (unlikely(!np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) rcu_assign_pointer(sk->sk_policy[i], np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) xfrm_address_t *remote, unsigned short family, u32 mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (unlikely(afinfo == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) err = afinfo->get_saddr(net, oif, local, remote, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /* Resolve list of templates for the flow, given policy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct xfrm_state **xfrm, unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct net *net = xp_net(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int nx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) int i, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) xfrm_address_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct xfrm_state *x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) xfrm_address_t *remote = daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) xfrm_address_t *local = saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (tmpl->mode == XFRM_MODE_TUNNEL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) tmpl->mode == XFRM_MODE_BEET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) remote = &tmpl->id.daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) local = &tmpl->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (xfrm_addr_any(local, tmpl->encap_family)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) error = xfrm_get_saddr(net, fl->flowi_oif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) &tmp, remote,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) tmpl->encap_family, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) local = &tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) family, policy->if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (x && x->km.state == XFRM_STATE_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) xfrm[nx++] = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) daddr = remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) saddr = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) error = (x->km.state == XFRM_STATE_ERROR ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) -EINVAL : -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) xfrm_state_put(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) } else if (error == -ESRCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (!tmpl->optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) return nx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) for (nx--; nx >= 0; nx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) xfrm_state_put(xfrm[nx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct xfrm_state **xfrm, unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) struct xfrm_state *tp[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) int cnx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) for (i = 0; i < npols; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) error = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) cnx += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) /* found states are sorted for outbound processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (npols > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) xfrm_state_sort(xfrm, tpp, cnx, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return cnx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) for (cnx--; cnx >= 0; cnx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) xfrm_state_put(tpp[cnx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) static int xfrm_get_tos(const struct flowi *fl, int family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) struct dst_ops *dst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) struct xfrm_dst *xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) if (!afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) dst_ops = &net->xfrm.xfrm4_dst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) dst_ops = &net->xfrm.xfrm6_dst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (likely(xdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) struct dst_entry *dst = &xdst->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) xdst = ERR_PTR(-ENOBUFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) int nfheader_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (dst->ops->family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) struct rt6_info *rt = (struct rt6_info *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) path->path_cookie = rt6_get_cookie(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) path->u.rt6.rt6i_nfheader_len = nfheader_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) const struct flowi *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) const struct xfrm_policy_afinfo *afinfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (!afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) err = afinfo->fill_dst(xdst, dev, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /* Allocate chain of dst_entry's, attach known xfrm's, calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * all the metrics... Shortly, bundle a bundle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct xfrm_state **xfrm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) struct xfrm_dst **bundle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) int nx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) const struct xfrm_state_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) const struct xfrm_mode *inner_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) struct net *net = xp_net(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) struct xfrm_dst *xdst_prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) struct xfrm_dst *xdst0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) int header_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) int nfheader_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) int trailer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) int tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) int family = policy->selector.family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) xfrm_address_t saddr, daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) tos = xfrm_get_tos(fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) dst_hold(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) for (; i < nx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) struct dst_entry *dst1 = &xdst->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) err = PTR_ERR(xdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) if (IS_ERR(xdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) goto put_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) bundle[i] = xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (!xdst_prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) xdst0 = xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /* Ref count is taken during xfrm_alloc_dst()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) * No need to do dst_clone() on dst1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (xfrm[i]->sel.family == AF_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) inner_mode = xfrm_ip2inner_mode(xfrm[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) xfrm_af2proto(family));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (!inner_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) err = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) goto put_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) inner_mode = &xfrm[i]->inner_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) xdst->route = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) dst_copy_metrics(dst1, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) __u32 mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) family = xfrm[i]->props.family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) &saddr, &daddr, family, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) err = PTR_ERR(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (IS_ERR(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) goto put_states;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) dst_hold(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) dst1->xfrm = xfrm[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) xdst->xfrm_genid = xfrm[i]->genid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) dst1->lastuse = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) dst1->input = dst_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (likely(afinfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) dst1->output = afinfo->output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) dst1->output = dst_discard_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) xdst_prev = xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) header_len += xfrm[i]->props.header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) nfheader_len += xfrm[i]->props.header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) trailer_len += xfrm[i]->props.trailer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) xfrm_dst_set_child(xdst_prev, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) xdst0->path = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) dev = dst->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) goto free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) xfrm_init_path(xdst0, dst, nfheader_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) xfrm_init_pmtu(bundle, nx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) err = xfrm_fill_dst(xdst_prev, dev, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) goto free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) xdst_prev->u.dst.header_len = header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) xdst_prev->u.dst.trailer_len = trailer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) header_len -= xdst_prev->u.dst.xfrm->props.header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) return &xdst0->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) put_states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) for (; i < nx; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) xfrm_state_put(xfrm[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) free_dst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (xdst0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) dst_release_immediate(&xdst0->u.dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static int xfrm_expand_policies(const struct flowi *fl, u16 family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) struct xfrm_policy **pols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) int *num_pols, int *num_xfrms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (*num_pols == 0 || !pols[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) *num_pols = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) *num_xfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (IS_ERR(pols[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) return PTR_ERR(pols[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) *num_xfrms = pols[0]->xfrm_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) #ifdef CONFIG_XFRM_SUB_POLICY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) XFRM_POLICY_TYPE_MAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) fl, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) XFRM_POLICY_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) pols[0]->if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (pols[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (IS_ERR(pols[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) xfrm_pols_put(pols, *num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) return PTR_ERR(pols[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) (*num_pols)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) (*num_xfrms) += pols[1]->xfrm_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) for (i = 0; i < *num_pols; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (pols[i]->action != XFRM_POLICY_ALLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) *num_xfrms = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) static struct xfrm_dst *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) const struct flowi *fl, u16 family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) struct dst_entry *dst_orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct net *net = xp_net(pols[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) struct xfrm_dst *xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) /* Try to instantiate a bundle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (err <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) if (IS_ERR(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return ERR_CAST(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) xdst = (struct xfrm_dst *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) xdst->num_xfrms = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) xdst->num_pols = num_pols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) xdst->policy_genid = atomic_read(&pols[0]->genid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static void xfrm_policy_queue_process(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) struct net *net = xp_net(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) struct xfrm_policy_queue *pq = &pol->polq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) struct flowi fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) struct sk_buff_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) __u32 skb_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) spin_lock(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) skb = skb_peek(&pq->hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) spin_unlock(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) /* Fixup the mark to support VTI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) skb_mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) skb->mark = pol->mark.v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) xfrm_decode_session(skb, &fl, dst->ops->family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) skb->mark = skb_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) spin_unlock(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) dst_hold(xfrm_dst_path(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (IS_ERR(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) goto purge_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (dst->flags & DST_XFRM_QUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) goto purge_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) pq->timeout = pq->timeout << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) xfrm_pol_hold(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) __skb_queue_head_init(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) spin_lock(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) pq->timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) skb_queue_splice_init(&pq->hold_queue, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) spin_unlock(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) while (!skb_queue_empty(&list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) skb = __skb_dequeue(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) /* Fixup the mark to support VTI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) skb_mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) skb->mark = pol->mark.v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) skb->mark = skb_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) dst_hold(xfrm_dst_path(skb_dst(skb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) if (IS_ERR(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) nf_reset_ct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) skb_dst_set(skb, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) dst_output(net, skb->sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) xfrm_pol_put(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) purge_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) pq->timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) skb_queue_purge(&pq->hold_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) xfrm_pol_put(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) unsigned long sched_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) struct dst_entry *dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) struct xfrm_policy *pol = xdst->pols[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) struct xfrm_policy_queue *pq = &pol->polq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (unlikely(skb_fclone_busy(sk, skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) skb_dst_force(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) spin_lock_bh(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (!pq->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) pq->timeout = XFRM_QUEUE_TMO_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) sched_next = jiffies + pq->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (del_timer(&pq->hold_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (time_before(pq->hold_timer.expires, sched_next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) sched_next = pq->hold_timer.expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) xfrm_pol_put(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) __skb_queue_tail(&pq->hold_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (!mod_timer(&pq->hold_timer, sched_next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) xfrm_pol_hold(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) spin_unlock_bh(&pq->hold_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) struct xfrm_flo *xflo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) int num_xfrms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) u16 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) struct dst_entry *dst1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) struct xfrm_dst *xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) xdst = xfrm_alloc_dst(net, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (IS_ERR(xdst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) net->xfrm.sysctl_larval_drop ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) num_xfrms <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) dst = xflo->dst_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) dst1 = &xdst->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) dst_hold(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) xdst->route = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) dst_copy_metrics(dst1, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) dst1->flags |= DST_XFRM_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) dst1->lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) dst1->input = dst_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) dst1->output = xdst_queue_output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) dst_hold(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) xfrm_dst_set_child(xdst, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) xdst->path = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) dev = dst->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) goto free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) err = xfrm_fill_dst(xdst, dev, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) goto free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) free_dst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) dst_release(dst1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) xdst = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) u16 family, u8 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) struct xfrm_flo *xflo, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) int num_pols = 0, num_xfrms = 0, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) struct xfrm_dst *xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) /* Resolve policies to use if we couldn't get them from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) * previous cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) num_pols = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) err = xfrm_expand_policies(fl, family, pols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) &num_pols, &num_xfrms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) goto inc_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (num_pols == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) if (num_xfrms <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) goto make_dummy_bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) xflo->dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (IS_ERR(xdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) err = PTR_ERR(xdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) if (err == -EREMOTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) xfrm_pols_put(pols, num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) goto make_dummy_bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) } else if (xdst == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) num_xfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) goto make_dummy_bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) make_dummy_bundle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) /* We found policies, but there's no bundles to instantiate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) * either because the policy blocks, has no transformations or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) * we could not build template (no xfrm_states).*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) if (IS_ERR(xdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) xfrm_pols_put(pols, num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) return ERR_CAST(xdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) xdst->num_pols = num_pols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) xdst->num_xfrms = num_xfrms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) return xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) inc_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) xfrm_pols_put(pols, num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static struct dst_entry *make_blackhole(struct net *net, u16 family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) struct dst_entry *dst_orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) struct dst_entry *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (!afinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) dst_release(dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ret = afinfo->blackhole_route(net, dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) /* Finds/creates a bundle for given flow and if_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * At the moment we eat a raw IP route. Mostly to speed up lookups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * on interfaces with disabled IPsec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) * xfrm_lookup uses an if_id of 0 by default, and is provided for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) * compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) struct dst_entry *dst_orig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) int flags, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) struct xfrm_dst *xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) struct dst_entry *dst, *route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) u16 family = dst_orig->ops->family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) u8 dir = XFRM_POLICY_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) xdst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) route = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) sk = sk_const_to_full_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) num_pols = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) err = xfrm_expand_policies(fl, family, pols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) &num_pols, &num_xfrms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) goto dropdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) if (num_pols) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (num_xfrms <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) drop_pols = num_pols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) goto no_transform;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) xdst = xfrm_resolve_and_create_bundle(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) pols, num_pols, fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) family, dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if (IS_ERR(xdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) xfrm_pols_put(pols, num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) err = PTR_ERR(xdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (err == -EREMOTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) goto nopol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) goto dropdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) } else if (xdst == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) num_xfrms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) drop_pols = num_pols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) goto no_transform;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) route = xdst->route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (xdst == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) struct xfrm_flo xflo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) xflo.dst_orig = dst_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) xflo.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) /* To accelerate a bit... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) !net->xfrm.policy_count[XFRM_POLICY_OUT]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) goto nopol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) if (xdst == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) goto nopol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (IS_ERR(xdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) err = PTR_ERR(xdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) goto dropdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) num_pols = xdst->num_pols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) num_xfrms = xdst->num_xfrms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) route = xdst->route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) dst = &xdst->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (route == NULL && num_xfrms > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) /* The only case when xfrm_bundle_lookup() returns a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) * bundle with null route, is when the template could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) * not be resolved. It means policies are there, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) * bundle could not be created, since we don't yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) * have the xfrm_state's. We need to wait for KM to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) * negotiate new SA's or bail out with error.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) if (net->xfrm.sysctl_larval_drop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) err = -EREMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) no_transform:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (num_pols == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) goto nopol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if ((flags & XFRM_LOOKUP_ICMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) !(pols[0]->flags & XFRM_POLICY_ICMP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) for (i = 0; i < num_pols; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) pols[i]->curlft.use_time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) if (num_xfrms < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) /* Prohibit the flow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) } else if (num_xfrms > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) /* Flow transformed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) dst_release(dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) /* Flow passes untransformed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) dst = dst_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) xfrm_pols_put(pols, drop_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (dst && dst->xfrm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) dst->flags |= DST_XFRM_TUNNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) nopol:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (!(flags & XFRM_LOOKUP_ICMP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) dst = dst_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) dropdst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) dst_release(dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) xfrm_pols_put(pols, drop_pols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) EXPORT_SYMBOL(xfrm_lookup_with_ifid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) /* Main function: finds/creates a bundle for given flow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) * At the moment we eat a raw IP route. Mostly to speed up lookups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) * on interfaces with disabled IPsec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) const struct flowi *fl, const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) EXPORT_SYMBOL(xfrm_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) * Otherwise we may send out blackholed packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) const struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) const struct sock *sk, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) flags | XFRM_LOOKUP_QUEUE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) XFRM_LOOKUP_KEEP_DST_REF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (PTR_ERR(dst) == -EREMOTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return make_blackhole(net, dst_orig->ops->family, dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (IS_ERR(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) dst_release(dst_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) EXPORT_SYMBOL(xfrm_lookup_route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) struct sec_path *sp = skb_sec_path(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) struct xfrm_state *x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (!sp || idx < 0 || idx >= sp->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) x = sp->xvec[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (!x->type->reject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) return x->type->reject(x, skb, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) /* When skb is transformed back to its "native" form, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) * check policy restrictions. At the moment we make this in maximally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) * stupid way. Shame on me. :-) Of course, connected sockets must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) * have policy cached at them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) if (xfrm_state_kern(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return x->id.proto == tmpl->id.proto &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) x->props.mode == tmpl->mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) !(x->props.mode != XFRM_MODE_TRANSPORT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) xfrm_state_addr_cmp(tmpl, x, family));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) * 0 or more than 0 is returned when validation is succeeded (either bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) * because of optional transport mode, or next index of the mathced secpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) * state with the template.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) * -1 is returned when no matching template is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) * Otherwise "-2 - errored_index" is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) int idx = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) if (tmpl->optional) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) if (tmpl->mode == XFRM_MODE_TRANSPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) start = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) for (; idx < sp->len; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) return ++idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) if (start == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) start = -2-idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) const struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) int ihl = iph->ihl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) u8 *xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) struct flowi4 *fl4 = &fl->u.ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) int oif = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) if (skb_dst(skb) && skb_dst(skb)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) oif = skb_dst(skb)->dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) memset(fl4, 0, sizeof(struct flowi4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) fl4->flowi4_mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) fl4->flowi4_proto = iph->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) fl4->daddr = reverse ? iph->saddr : iph->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) fl4->saddr = reverse ? iph->daddr : iph->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (!ip_is_fragment(iph)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) switch (iph->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) case IPPROTO_UDPLITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) case IPPROTO_SCTP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) case IPPROTO_DCCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) if (xprth + 4 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) pskb_may_pull(skb, xprth + 4 - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) __be16 *ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) ports = (__be16 *)xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) fl4->fl4_sport = ports[!!reverse];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) fl4->fl4_dport = ports[!reverse];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) case IPPROTO_ICMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) if (xprth + 2 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) pskb_may_pull(skb, xprth + 2 - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) u8 *icmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) icmp = xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) fl4->fl4_icmp_type = icmp[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) fl4->fl4_icmp_code = icmp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) case IPPROTO_ESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (xprth + 4 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) pskb_may_pull(skb, xprth + 4 - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) __be32 *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) ehdr = (__be32 *)xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) fl4->fl4_ipsec_spi = ehdr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) case IPPROTO_AH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (xprth + 8 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) pskb_may_pull(skb, xprth + 8 - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) __be32 *ah_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) ah_hdr = (__be32 *)xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) fl4->fl4_ipsec_spi = ah_hdr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) case IPPROTO_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (xprth + 4 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) pskb_may_pull(skb, xprth + 4 - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) __be16 *ipcomp_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) ipcomp_hdr = (__be16 *)xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) case IPPROTO_GRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) if (xprth + 12 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) pskb_may_pull(skb, xprth + 12 - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) __be16 *greflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) __be32 *gre_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) xprth = skb_network_header(skb) + ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) greflags = (__be16 *)xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) gre_hdr = (__be32 *)xprth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) if (greflags[0] & GRE_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (greflags[0] & GRE_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) gre_hdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) fl4->fl4_gre_key = gre_hdr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) fl4->fl4_ipsec_spi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) struct flowi6 *fl6 = &fl->u.ip6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) int onlyproto = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) const struct ipv6hdr *hdr = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) u32 offset = sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) struct ipv6_opt_hdr *exthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) const unsigned char *nh = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) u16 nhoff = IP6CB(skb)->nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) int oif = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) u8 nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) if (!nhoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) nhoff = offsetof(struct ipv6hdr, nexthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) nexthdr = nh[nhoff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (skb_dst(skb) && skb_dst(skb)->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) oif = skb_dst(skb)->dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) memset(fl6, 0, sizeof(struct flowi6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) fl6->flowi6_mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) while (nh + offset + sizeof(*exthdr) < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) nh = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) exthdr = (struct ipv6_opt_hdr *)(nh + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) switch (nexthdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) case NEXTHDR_FRAGMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) onlyproto = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) case NEXTHDR_ROUTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) case NEXTHDR_HOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) case NEXTHDR_DEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) offset += ipv6_optlen(exthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) nexthdr = exthdr->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) exthdr = (struct ipv6_opt_hdr *)(nh + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) case IPPROTO_UDPLITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) case IPPROTO_SCTP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) case IPPROTO_DCCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) if (!onlyproto && (nh + offset + 4 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) __be16 *ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) nh = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) ports = (__be16 *)(nh + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) fl6->fl6_sport = ports[!!reverse];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) fl6->fl6_dport = ports[!reverse];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) fl6->flowi6_proto = nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) case IPPROTO_ICMPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (!onlyproto && (nh + offset + 2 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) u8 *icmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) nh = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) icmp = (u8 *)(nh + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) fl6->fl6_icmp_type = icmp[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) fl6->fl6_icmp_code = icmp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) fl6->flowi6_proto = nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) case IPPROTO_GRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (!onlyproto &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) (nh + offset + 12 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) struct gre_base_hdr *gre_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) __be32 *gre_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) nh = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) gre_hdr = (struct gre_base_hdr *)(nh + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) gre_key = (__be32 *)(gre_hdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) if (gre_hdr->flags & GRE_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) if (gre_hdr->flags & GRE_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) gre_key++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) fl6->fl6_gre_key = *gre_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) fl6->flowi6_proto = nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) #if IS_ENABLED(CONFIG_IPV6_MIP6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) case IPPROTO_MH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) offset += ipv6_optlen(exthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (!onlyproto && (nh + offset + 3 < skb->data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) struct ip6_mh *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) nh = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) mh = (struct ip6_mh *)(nh + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) fl6->fl6_mh_type = mh->ip6mh_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) fl6->flowi6_proto = nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) /* XXX Why are there these headers? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) case IPPROTO_AH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) case IPPROTO_ESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) case IPPROTO_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) fl6->fl6_ipsec_spi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) fl6->flowi6_proto = nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) unsigned int family, int reverse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) decode_session4(skb, fl, reverse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) decode_session6(skb, fl, reverse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) return security_xfrm_decode_session(skb, &fl->flowi_secid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) EXPORT_SYMBOL(__xfrm_decode_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) for (; k < sp->len; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) *idxp = k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) struct xfrm_policy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) int npols = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) int xfrm_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) int pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) int reverse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) struct flowi fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) int xerr_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) const struct xfrm_if_cb *ifcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) struct sec_path *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) struct xfrm_if *xi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) u32 if_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) ifcb = xfrm_if_get_cb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) if (ifcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) xi = ifcb->decode_session(skb, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) if (xi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) if_id = xi->p.if_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) net = xi->net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) reverse = dir & ~XFRM_POLICY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) dir &= XFRM_POLICY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) nf_nat_decode_session(skb, &fl, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) /* First, check used SA against their selectors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) sp = skb_sec_path(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (sp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) for (i = sp->len - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) struct xfrm_state *x = sp->xvec[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (!xfrm_selector_match(&x->sel, &fl, family)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) sk = sk_to_full_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) if (sk && sk->sk_policy[dir]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) if (IS_ERR(pol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (!pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) if (IS_ERR(pol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) if (!pol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) xfrm_secpath_reject(xerr_idx, skb, &fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) pol->curlft.use_time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) pols[0] = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) npols++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) #ifdef CONFIG_XFRM_SUB_POLICY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) &fl, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) XFRM_POLICY_IN, if_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if (pols[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) if (IS_ERR(pols[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) pols[1]->curlft.use_time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) npols++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) if (pol->action == XFRM_POLICY_ALLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) static struct sec_path dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) struct xfrm_tmpl **tpp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) int ti = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) int i, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) sp = skb_sec_path(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) sp = &dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) for (pi = 0; pi < npols; pi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) if (pols[pi] != pol &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) pols[pi]->action != XFRM_POLICY_ALLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) goto reject_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) for (i = 0; i < pols[pi]->xfrm_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) tpp[ti++] = &pols[pi]->xfrm_vec[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) xfrm_nr = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (npols > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) tpp = stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) /* For each tunnel xfrm, find the first matching tmpl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) * For each tmpl before that, find corresponding xfrm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) * Order is _important_. Later we will implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) * some barriers, but at the moment barriers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) * are implied between each two transformations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) k = xfrm_policy_ok(tpp[i], sp, k, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) if (k < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) if (k < -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /* "-2 - errored_index" returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) xerr_idx = -(2+k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) if (secpath_has_nontransport(sp, k, &xerr_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) xfrm_pols_put(pols, npols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) reject:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) xfrm_secpath_reject(xerr_idx, skb, &fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) reject_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) xfrm_pols_put(pols, npols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) EXPORT_SYMBOL(__xfrm_policy_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) struct flowi fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) int res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) if (xfrm_decode_session(skb, &fl, family) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) skb_dst_force(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) if (!skb_dst(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) if (IS_ERR(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) skb_dst_set(skb, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) EXPORT_SYMBOL(__xfrm_route_forward);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) /* Optimize later using cookies and generation ids. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) * get validated by dst_ops->check on every use. We do this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) * because when a normal route referenced by an XFRM dst is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) * obsoleted we do not go looking around for all parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) * referencing XFRM dsts so that we can invalidate them. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) * is just too much work. Instead we make the checks here on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) * every use. For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) * XFRM dst A --> IPv4 dst X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) * X is the "xdst->route" of A (X is also the "dst->path" of A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) * in this example). If X is marked obsolete, "A" will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) * notice. That's what we are validating here via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) * stale_bundle() check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) * be marked on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) * This will force stale_bundle() to fail on any xdst bundle with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) * this dst linked in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) if (dst->obsolete < 0 && !stale_bundle(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) static int stale_bundle(struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) return !xfrm_bundle_ok((struct xfrm_dst *)dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) dst->dev = dev_net(dev)->loopback_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) dev_hold(dst->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) EXPORT_SYMBOL(xfrm_dst_ifdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) static void xfrm_link_failure(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) /* Impossible. Such dst must be popped before reaches point of failure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) if (dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) if (dst->obsolete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) while (nr--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) struct xfrm_dst *xdst = bundle[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) u32 pmtu, route_mtu_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) dst = &xdst->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) pmtu = dst_mtu(xfrm_dst_child(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) xdst->child_mtu_cached = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) route_mtu_cached = dst_mtu(xdst->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) xdst->route_mtu_cached = route_mtu_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (pmtu > route_mtu_cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) pmtu = route_mtu_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) dst_metric_set(dst, RTAX_MTU, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) /* Check that the bundle accepts the flow and its components are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) * still valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) static int xfrm_bundle_ok(struct xfrm_dst *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) struct dst_entry *dst = &first->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) struct xfrm_dst *xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) int start_from, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) (dst->dev && !netif_running(dst->dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) if (dst->flags & DST_XFRM_QUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) start_from = nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (dst->xfrm->km.state != XFRM_STATE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) if (xdst->xfrm_genid != dst->xfrm->genid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) if (xdst->num_pols > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) bundle[nr++] = xdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) mtu = dst_mtu(xfrm_dst_child(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) if (xdst->child_mtu_cached != mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) start_from = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) xdst->child_mtu_cached = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) if (!dst_check(xdst->route, xdst->route_cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) mtu = dst_mtu(xdst->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) if (xdst->route_mtu_cached != mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) start_from = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) xdst->route_mtu_cached = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) dst = xfrm_dst_child(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) } while (dst->xfrm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) if (likely(!start_from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) xdst = bundle[start_from - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) mtu = xdst->child_mtu_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) while (start_from--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) dst = &xdst->u.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) mtu = xfrm_state_mtu(dst->xfrm, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (mtu > xdst->route_mtu_cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) mtu = xdst->route_mtu_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) dst_metric_set(dst, RTAX_MTU, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) if (!start_from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) xdst = bundle[start_from - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) xdst->child_mtu_cached = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) return dst_metric_advmss(xfrm_dst_path(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) static unsigned int xfrm_mtu(const struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) return mtu ? : dst_mtu(xfrm_dst_path(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) const void *daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) while (dst->xfrm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) const struct xfrm_state *xfrm = dst->xfrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) dst = xfrm_dst_child(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) daddr = xfrm->coaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) daddr = &xfrm->id.daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) return daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) const void *daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) const struct dst_entry *path = xfrm_dst_path(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) daddr = xfrm_get_dst_nexthop(dst, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) return path->ops->neigh_lookup(path, skb, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) const struct dst_entry *path = xfrm_dst_path(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) daddr = xfrm_get_dst_nexthop(dst, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) path->ops->confirm_neigh(path, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) spin_lock(&xfrm_policy_afinfo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) if (unlikely(xfrm_policy_afinfo[family] != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) struct dst_ops *dst_ops = afinfo->dst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) if (likely(dst_ops->kmem_cachep == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) dst_ops->kmem_cachep = xfrm_dst_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) if (likely(dst_ops->check == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) dst_ops->check = xfrm_dst_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) if (likely(dst_ops->default_advmss == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) dst_ops->default_advmss = xfrm_default_advmss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) if (likely(dst_ops->mtu == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) dst_ops->mtu = xfrm_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) if (likely(dst_ops->negative_advice == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) dst_ops->negative_advice = xfrm_negative_advice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) if (likely(dst_ops->link_failure == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) dst_ops->link_failure = xfrm_link_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) if (likely(dst_ops->neigh_lookup == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) dst_ops->neigh_lookup = xfrm_neigh_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) if (likely(!dst_ops->confirm_neigh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) dst_ops->confirm_neigh = xfrm_confirm_neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) spin_unlock(&xfrm_policy_afinfo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) EXPORT_SYMBOL(xfrm_policy_register_afinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) struct dst_ops *dst_ops = afinfo->dst_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) if (xfrm_policy_afinfo[i] != afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) dst_ops->kmem_cachep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) dst_ops->check = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) dst_ops->negative_advice = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) dst_ops->link_failure = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) spin_lock(&xfrm_if_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) rcu_assign_pointer(xfrm_if_cb, ifcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) spin_unlock(&xfrm_if_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) EXPORT_SYMBOL(xfrm_if_register_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) void xfrm_if_unregister_cb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) RCU_INIT_POINTER(xfrm_if_cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) EXPORT_SYMBOL(xfrm_if_unregister_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) #ifdef CONFIG_XFRM_STATISTICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) static int __net_init xfrm_statistics_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) if (!net->mib.xfrm_statistics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) rv = xfrm_proc_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) free_percpu(net->mib.xfrm_statistics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) static void xfrm_statistics_fini(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) xfrm_proc_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) free_percpu(net->mib.xfrm_statistics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) static int __net_init xfrm_statistics_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) static void xfrm_statistics_fini(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) static int __net_init xfrm_policy_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) unsigned int hmask, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) int dir, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) if (net_eq(net, &init_net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) sizeof(struct xfrm_dst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) err = rhashtable_init(&xfrm_policy_inexact_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) &xfrm_pol_inexact_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) hmask = 8 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) sz = (hmask+1) * sizeof(struct hlist_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) if (!net->xfrm.policy_byidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) goto out_byidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) net->xfrm.policy_idx_hmask = hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) struct xfrm_policy_hash *htab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) net->xfrm.policy_count[dir] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) htab = &net->xfrm.policy_bydst[dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) htab->table = xfrm_hash_alloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) if (!htab->table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) goto out_bydst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) htab->hmask = hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) htab->dbits4 = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) htab->sbits4 = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) htab->dbits6 = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) htab->sbits6 = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) net->xfrm.policy_hthresh.lbits4 = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) net->xfrm.policy_hthresh.rbits4 = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) net->xfrm.policy_hthresh.lbits6 = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) net->xfrm.policy_hthresh.rbits6 = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) seqlock_init(&net->xfrm.policy_hthresh.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) INIT_LIST_HEAD(&net->xfrm.policy_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) INIT_LIST_HEAD(&net->xfrm.inexact_bins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) out_bydst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) for (dir--; dir >= 0; dir--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) struct xfrm_policy_hash *htab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) htab = &net->xfrm.policy_bydst[dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) xfrm_hash_free(htab->table, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) xfrm_hash_free(net->xfrm.policy_byidx, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) out_byidx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) static void xfrm_policy_fini(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) struct xfrm_pol_inexact_bin *b, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) unsigned int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) int dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) flush_work(&net->xfrm.policy_hash_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) #ifdef CONFIG_XFRM_SUB_POLICY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) WARN_ON(!list_empty(&net->xfrm.policy_all));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) struct xfrm_policy_hash *htab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) htab = &net->xfrm.policy_bydst[dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) sz = (htab->hmask + 1) * sizeof(struct hlist_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) WARN_ON(!hlist_empty(htab->table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) xfrm_hash_free(htab->table, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) xfrm_hash_free(net->xfrm.policy_byidx, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) __xfrm_policy_inexact_prune_bin(b, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) static int __net_init xfrm_net_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) /* Initialize the per-net locks here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) spin_lock_init(&net->xfrm.xfrm_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) spin_lock_init(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) mutex_init(&net->xfrm.xfrm_cfg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) rv = xfrm_statistics_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) goto out_statistics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) rv = xfrm_state_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) goto out_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) rv = xfrm_policy_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) goto out_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) rv = xfrm_sysctl_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) goto out_sysctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) out_sysctl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) xfrm_policy_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) out_policy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) xfrm_state_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) out_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) xfrm_statistics_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) out_statistics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) static void __net_exit xfrm_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) xfrm_sysctl_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) xfrm_policy_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) xfrm_state_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) xfrm_statistics_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) static struct pernet_operations __net_initdata xfrm_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) .init = xfrm_net_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) .exit = xfrm_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) void __init xfrm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) register_pernet_subsys(&xfrm_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) xfrm_dev_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) xfrm_input_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) #ifdef CONFIG_XFRM_ESPINTCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) espintcp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) RCU_INIT_POINTER(xfrm_if_cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) #ifdef CONFIG_AUDITSYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) struct audit_buffer *audit_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) struct xfrm_sec_ctx *ctx = xp->security;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) struct xfrm_selector *sel = &xp->selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) switch (sel->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) if (sel->prefixlen_s != 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) audit_log_format(audit_buf, " src_prefixlen=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) sel->prefixlen_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) if (sel->prefixlen_d != 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) audit_log_format(audit_buf, " dst_prefixlen=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) sel->prefixlen_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) if (sel->prefixlen_s != 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) audit_log_format(audit_buf, " src_prefixlen=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) sel->prefixlen_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) if (sel->prefixlen_d != 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) audit_log_format(audit_buf, " dst_prefixlen=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) sel->prefixlen_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) struct audit_buffer *audit_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) audit_buf = xfrm_audit_start("SPD-add");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) if (audit_buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) xfrm_audit_helper_usrinfo(task_valid, audit_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) audit_log_format(audit_buf, " res=%u", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) xfrm_audit_common_policyinfo(xp, audit_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) audit_log_end(audit_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) bool task_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) struct audit_buffer *audit_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) audit_buf = xfrm_audit_start("SPD-delete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) if (audit_buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) xfrm_audit_helper_usrinfo(task_valid, audit_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) audit_log_format(audit_buf, " res=%u", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) xfrm_audit_common_policyinfo(xp, audit_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) audit_log_end(audit_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) #ifdef CONFIG_XFRM_MIGRATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) const struct xfrm_selector *sel_tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) if (sel_tgt->family == sel_cmp->family &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) sel_cmp->family) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) sel_cmp->family) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) u8 dir, u8 type, struct net *net, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) struct xfrm_policy *pol, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) struct hlist_head *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) u32 priority = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) spin_lock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) hlist_for_each_entry(pol, chain, bydst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) if ((if_id == 0 || pol->if_id == if_id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) xfrm_migrate_selector_match(sel, &pol->selector) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) pol->type == type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) ret = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) priority = ret->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) chain = &net->xfrm.policy_inexact[dir];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) hlist_for_each_entry(pol, chain, bydst_inexact_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) if ((pol->priority >= priority) && ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) if ((if_id == 0 || pol->if_id == if_id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) xfrm_migrate_selector_match(sel, &pol->selector) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) pol->type == type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) ret = pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) xfrm_pol_hold(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) int match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (t->mode == m->mode && t->id.proto == m->proto &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) (m->reqid == 0 || t->reqid == m->reqid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) switch (t->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) case XFRM_MODE_TUNNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) case XFRM_MODE_BEET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) m->old_family) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) xfrm_addr_equal(&t->saddr, &m->old_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) m->old_family)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) case XFRM_MODE_TRANSPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) /* in case of transport mode, template does not store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) any IP addresses, hence we just compare mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) return match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) /* update endpoint address(es) of template(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) static int xfrm_policy_migrate(struct xfrm_policy *pol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) struct xfrm_migrate *m, int num_migrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) struct xfrm_migrate *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) int i, j, n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) write_lock_bh(&pol->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) if (unlikely(pol->walk.dead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) /* target policy has been deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) write_unlock_bh(&pol->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) for (i = 0; i < pol->xfrm_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) for (j = 0, mp = m; j < num_migrate; j++, mp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) /* update endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) sizeof(pol->xfrm_vec[i].id.daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) sizeof(pol->xfrm_vec[i].saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) pol->xfrm_vec[i].encap_family = mp->new_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) /* flush bundles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) atomic_inc(&pol->genid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) write_unlock_bh(&pol->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) for (i = 0; i < num_migrate; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) /* check if there is any duplicated entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) for (j = i + 1; j < num_migrate; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) sizeof(m[i].old_daddr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) !memcmp(&m[i].old_saddr, &m[j].old_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) sizeof(m[i].old_saddr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) m[i].proto == m[j].proto &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) m[i].mode == m[j].mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) m[i].reqid == m[j].reqid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) m[i].old_family == m[j].old_family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) struct xfrm_migrate *m, int num_migrate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) struct xfrm_kmaddress *k, struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) struct xfrm_encap_tmpl *encap, u32 if_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) int i, err, nx_cur = 0, nx_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) struct xfrm_policy *pol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) struct xfrm_state *x, *xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) struct xfrm_state *x_new[XFRM_MAX_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) struct xfrm_migrate *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) /* Stage 0 - sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) if (dir >= XFRM_POLICY_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) /* Stage 1 - find policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) /* Stage 2 - find and update state(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) for (i = 0, mp = m; i < num_migrate; i++, mp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) x_cur[nx_cur] = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) nx_cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) xc = xfrm_state_migrate(x, mp, encap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) if (xc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) x_new[nx_new] = xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) nx_new++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) err = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) goto restore_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) /* Stage 3 - update policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) goto restore_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) /* Stage 4 - delete old state(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) if (nx_cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) xfrm_states_put(x_cur, nx_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) xfrm_states_delete(x_cur, nx_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) /* Stage 5 - announce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) km_migrate(sel, dir, type, m, num_migrate, k, encap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) xfrm_pol_put(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) restore_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) if (pol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) xfrm_pol_put(pol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (nx_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) xfrm_states_put(x_cur, nx_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (nx_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) xfrm_states_delete(x_new, nx_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) EXPORT_SYMBOL(xfrm_migrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) #endif