^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define pr_fmt(fmt) "IPsec: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/esp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pfkeyv2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/espintcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct esp_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct xfrm_skb_cb xfrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct esp_output_extra {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) __be32 seqhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 esphoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Allocate an AEAD request structure with extra space for SG and IV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * For alignment considerations the IV is placed at the front, followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * by the request and finally the SG list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * TODO: Use spare space in skb for this where possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) len = extralen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) len += crypto_aead_ivsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) len += crypto_aead_alignmask(aead) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ~(crypto_tfm_ctx_alignment() - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) len = ALIGN(len, crypto_tfm_ctx_alignment());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) len = ALIGN(len, __alignof__(struct scatterlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) len += sizeof(struct scatterlist) * nfrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return kmalloc(len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline void *esp_tmp_extra(void *tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return crypto_aead_ivsize(aead) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) PTR_ALIGN((u8 *)tmp + extralen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) crypto_aead_alignmask(aead) + 1) : tmp + extralen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) crypto_tfm_ctx_alignment());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) aead_request_set_tfm(req, aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return (void *)ALIGN((unsigned long)(req + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) crypto_aead_reqsize(aead),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __alignof__(struct scatterlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct esp_output_extra *extra = esp_tmp_extra(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct crypto_aead *aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int extralen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u8 *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (x->props.flags & XFRM_STATE_ESN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) extralen += sizeof(*extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) extra = esp_tmp_extra(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) iv = esp_tmp_iv(aead, tmp, extralen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) req = esp_tmp_req(aead, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Unref skb_frag_pages in the src scatterlist if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Skip the first sg which comes from skb->data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (req->src != req->dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) for (sg = sg_next(req->src); sg; sg = sg_next(sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) put_page(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #ifdef CONFIG_INET_ESPINTCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct esp_tcp_sk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void esp_free_tcp_sk(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) sock_put(esk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(esk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct xfrm_encap_tmpl *encap = x->encap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct esp_tcp_sk *esk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __be16 sport, dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct sock *nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) sk = rcu_dereference(x->encap_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (sk && sk->sk_state == TCP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_lock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sport = encap->encap_sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dport = encap->encap_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) nsk = rcu_dereference_protected(x->encap_sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) lockdep_is_held(&x->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (sk && sk == nsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!esk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) RCU_INIT_POINTER(x->encap_sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) esk->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) call_rcu(&esk->rcu, esp_free_tcp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sk = inet_lookup_established(xs_net(x), &tcp_hashinfo, x->id.daddr.a4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dport, x->props.saddr.a4, sport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!tcp_is_ulp_esp(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_lock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) nsk = rcu_dereference_protected(x->encap_sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) lockdep_is_held(&x->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (encap->encap_sport != sport ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) encap->encap_dport != dport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) sk = nsk ?: ERR_PTR(-EREMCHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) } else if (sk == nsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rcu_assign_pointer(x->encap_sk, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) sk = esp_find_tcp_sk(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) err = PTR_ERR_OR_ZERO(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (sock_owned_by_user(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) err = espintcp_queue_out(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err = espintcp_push_skb(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct dst_entry *dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct xfrm_state *x = dst->xfrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return esp_output_tcp_finish(x, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* EINPROGRESS just happens to do the right thing. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * actually means that the skb has been consumed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * isn't coming back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return err ?: -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void esp_output_done(struct crypto_async_request *base, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct sk_buff *skb = base->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct xfrm_state *x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (xo && (xo->flags & XFRM_DEV_RESUME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct sec_path *sp = skb_sec_path(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) x = sp->xvec[sp->len - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) x = skb_dst(skb)->xfrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) tmp = ESP_SKB_CB(skb)->tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) esp_ssg_unref(x, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (xo && (xo->flags & XFRM_DEV_RESUME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) skb_push(skb, skb->data - skb_mac_header(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) secpath_reset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) xfrm_dev_resume(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) esp_output_tail_tcp(x, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) xfrm_output_resume(skb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Move ESP header back into place. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct ip_esp_hdr *esph = (void *)(skb->data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void *tmp = ESP_SKB_CB(skb)->tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __be32 *seqhi = esp_tmp_extra(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) esph->seq_no = esph->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) esph->spi = *seqhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void esp_output_restore_header(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void *tmp = ESP_SKB_CB(skb)->tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct esp_output_extra *extra = esp_tmp_extra(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sizeof(__be32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct xfrm_state *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct ip_esp_hdr *esph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct esp_output_extra *extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* For ESN we move the header forward by 4 bytes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * accomodate the high bits. We will move it back after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * encryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if ((x->props.flags & XFRM_STATE_ESN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __u32 seqhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (xo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) seqhi = xo->seq.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) extra->esphoff = (unsigned char *)esph -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) extra->seqhi = esph->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) esph->seq_no = htonl(seqhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) esph->spi = x->id.spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void esp_output_done_esn(struct crypto_async_request *base, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct sk_buff *skb = base->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) esp_output_restore_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) esp_output_done(base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int encap_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct esp_info *esp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __be16 dport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct udphdr *uh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) __be32 *udpdata32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) len = skb->len + esp->tailen - skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (len + sizeof(struct iphdr) > IP_MAX_MTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return ERR_PTR(-EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) uh = (struct udphdr *)esp->esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) uh->source = sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) uh->dest = dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) uh->len = htons(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) uh->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) *skb_mac_header(skb) = IPPROTO_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) udpdata32 = (__be32 *)(uh + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) udpdata32[0] = udpdata32[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return (struct ip_esp_hdr *)(udpdata32 + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return (struct ip_esp_hdr *)(uh + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #ifdef CONFIG_INET_ESPINTCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct esp_info *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) __be16 *lenp = (void *)esp->esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct ip_esp_hdr *esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) len = skb->len + esp->tailen - skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (len > IP_MAX_MTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ERR_PTR(-EMSGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) sk = esp_find_tcp_sk(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (IS_ERR(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return ERR_CAST(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *lenp = htons(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) esph = (struct ip_esp_hdr *)(lenp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct esp_info *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct esp_info *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct xfrm_encap_tmpl *encap = x->encap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct ip_esp_hdr *esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __be16 sport, dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int encap_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spin_lock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sport = encap->encap_sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dport = encap->encap_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) encap_type = encap->encap_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) switch (encap_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) case UDP_ENCAP_ESPINUDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) case UDP_ENCAP_ESPINUDP_NON_IKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) case TCP_ENCAP_ESPINTCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) esph = esp_output_tcp_encap(x, skb, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (IS_ERR(esph))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return PTR_ERR(esph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) esp->esph = esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u8 *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int nfrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int esph_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct sk_buff *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int tailen = esp->tailen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned int allocsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* this is non-NULL only with TCP/UDP Encapsulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (x->encap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int err = esp_output_encap(x, skb, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (allocsz > ESP_SKB_FRAG_MAXSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!skb_cloned(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (tailen <= skb_tailroom(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) nfrags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) trailer = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) tail = skb_tail_pointer(trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto skip_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) && !skb_has_frag_list(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int allocsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct page_frag *pfrag = &x->xfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) esp->inplace = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) allocsize = ALIGN(tailen, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_lock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) page = pfrag->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tail = page_address(page) + pfrag->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) nfrags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tailen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) skb_shinfo(skb)->nr_frags = ++nfrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pfrag->offset = pfrag->offset + allocsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) nfrags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) skb->len += tailen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) skb->data_len += tailen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) skb->truesize += tailen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (sk && sk_fullsock(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) refcount_add(tailen, &sk->sk_wmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) nfrags = skb_cow_data(skb, tailen, &trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (nfrags < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) tail = skb_tail_pointer(trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) skip_cow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) pskb_put(skb, trailer, tailen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return nfrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) EXPORT_SYMBOL_GPL(esp_output_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u8 *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int ivlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int extralen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct ip_esp_hdr *esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct crypto_aead *aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct scatterlist *sg, *dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct esp_output_extra *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) assoclen = sizeof(struct ip_esp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) extralen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (x->props.flags & XFRM_STATE_ESN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) extralen += sizeof(*extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) assoclen += sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) alen = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ivlen = crypto_aead_ivsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) extra = esp_tmp_extra(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) iv = esp_tmp_iv(aead, tmp, extralen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) req = esp_tmp_req(aead, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sg = esp_req_sg(aead, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (esp->inplace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dsg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dsg = &sg[esp->nfrags];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) esph = esp_output_set_extra(skb, x, esp->esph, extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) esp->esph = esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sg_init_table(sg, esp->nfrags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) err = skb_to_sgvec(skb, sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) (unsigned char *)esph - skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) assoclen + ivlen + esp->clen + alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto error_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!esp->inplace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int allocsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct page_frag *pfrag = &x->xfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_lock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) goto error_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) skb_shinfo(skb)->nr_frags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) page = pfrag->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* replace page frags in skb with new page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pfrag->offset = pfrag->offset + allocsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) spin_unlock_bh(&x->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) err = skb_to_sgvec(skb, dsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) (unsigned char *)esph - skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) assoclen + ivlen + esp->clen + alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto error_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if ((x->props.flags & XFRM_STATE_ESN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) aead_request_set_callback(req, 0, esp_output_done_esn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) aead_request_set_callback(req, 0, esp_output_done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) aead_request_set_ad(req, assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) memset(iv, 0, ivlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) min(ivlen, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ESP_SKB_CB(skb)->tmp = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) err = crypto_aead_encrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case -ENOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) err = NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if ((x->props.flags & XFRM_STATE_ESN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) esp_output_restore_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (sg != dsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) esp_ssg_unref(x, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = esp_output_tail_tcp(x, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) error_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) EXPORT_SYMBOL_GPL(esp_output_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct ip_esp_hdr *esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct crypto_aead *aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct esp_info esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) esp.inplace = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) esp.proto = *skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *skb_mac_header(skb) = IPPROTO_ESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* skb is pure payload to encrypt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) alen = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) esp.tfclen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (x->tfcpad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u32 padto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (skb->len < padto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) esp.tfclen = padto - skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) blksize = ALIGN(crypto_aead_blocksize(aead), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) esp.plen = esp.clen - skb->len - esp.tfclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) esp.tailen = esp.tfclen + esp.plen + alen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) esp.esph = ip_esp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) esp.nfrags = esp_output_head(x, skb, &esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (esp.nfrags < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return esp.nfrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) esph = esp.esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) esph->spi = x->id.spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) skb_push(skb, -skb_network_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return esp_output_tail(x, skb, &esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static inline int esp_remove_trailer(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct xfrm_state *x = xfrm_input_state(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct crypto_aead *aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int alen, hlen, elen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int padlen, trimlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) __wsum csumdiff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) u8 nexthdr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) alen = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) elen = skb->len - hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ret = xo->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) padlen = nexthdr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (padlen + 2 + alen >= elen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) padlen + 2, elen - alen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) trimlen = alen + padlen + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (skb->ip_summed == CHECKSUM_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) skb->csum = csum_block_sub(skb->csum, csumdiff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) skb->len - trimlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pskb_trim(skb, skb->len - trimlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = nexthdr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int esp_input_done2(struct sk_buff *skb, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct xfrm_state *x = xfrm_input_state(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct crypto_aead *aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int ihl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) kfree(ESP_SKB_CB(skb)->tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) err = esp_remove_trailer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ihl = iph->ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (x->encap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct xfrm_encap_tmpl *encap = x->encap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct tcphdr *th = (void *)(skb_network_header(skb) + ihl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) __be16 source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) switch (x->encap->encap_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) case TCP_ENCAP_ESPINTCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) source = th->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) case UDP_ENCAP_ESPINUDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) case UDP_ENCAP_ESPINUDP_NON_IKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) source = uh->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * 1) if the NAT-T peer's IP or port changed then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * advertize the change to the keying daemon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * This is an inbound SA, so just compare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * SRC ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (iph->saddr != x->props.saddr.a4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) source != encap->encap_sport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) xfrm_address_t ipaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ipaddr.a4 = iph->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) km_new_mapping(x, &ipaddr, source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* XXX: perhaps add an extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * policy check here, to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * if we should allow or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * reject a packet from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * different source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * address/port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * 2) ignore UDP/TCP checksums in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * of NAT-T in Transport Mode, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * perform other post-processing fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * as per draft-ietf-ipsec-udp-encaps-06,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * section 3.1.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (x->props.mode == XFRM_MODE_TRANSPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) skb_pull_rcsum(skb, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (x->props.mode == XFRM_MODE_TUNNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) skb_set_transport_header(skb, -ihl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* RFC4303: Drop dummy packets without any error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (err == IPPROTO_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) EXPORT_SYMBOL_GPL(esp_input_done2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static void esp_input_done(struct crypto_async_request *base, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct sk_buff *skb = base->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) xfrm_input_resume(skb, esp_input_done2(skb, err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static void esp_input_restore_header(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) esp_restore_header(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) __skb_pull(skb, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct xfrm_state *x = xfrm_input_state(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct ip_esp_hdr *esph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* For ESN we move the header forward by 4 bytes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * accomodate the high bits. We will move it back after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * decryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if ((x->props.flags & XFRM_STATE_ESN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) esph = skb_push(skb, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *seqhi = esph->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) esph->spi = esph->seq_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static void esp_input_done_esn(struct crypto_async_request *base, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct sk_buff *skb = base->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) esp_input_restore_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) esp_input_done(base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Note: detecting truncated vs. non-truncated authentication data is very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * expensive, so we only support truncated data, which is the recommended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * and common case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct crypto_aead *aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct sk_buff *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int ivlen = crypto_aead_ivsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int nfrags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int seqhilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) __be32 *seqhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) u8 *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (elen <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) assoclen = sizeof(struct ip_esp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) seqhilen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (x->props.flags & XFRM_STATE_ESN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) seqhilen += sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) assoclen += seqhilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!skb_cloned(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (!skb_is_nonlinear(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) nfrags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto skip_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) } else if (!skb_has_frag_list(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) nfrags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) nfrags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) goto skip_cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) err = skb_cow_data(skb, 0, &trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) nfrags = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) skip_cow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ESP_SKB_CB(skb)->tmp = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) seqhi = esp_tmp_extra(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) iv = esp_tmp_iv(aead, tmp, seqhilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) req = esp_tmp_req(aead, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) sg = esp_req_sg(aead, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) esp_input_set_header(skb, seqhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) sg_init_table(sg, nfrags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err = skb_to_sgvec(skb, sg, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if ((x->props.flags & XFRM_STATE_ESN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) aead_request_set_callback(req, 0, esp_input_done_esn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) aead_request_set_callback(req, 0, esp_input_done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) aead_request_set_ad(req, assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) err = crypto_aead_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (err == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if ((x->props.flags & XFRM_STATE_ESN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) esp_input_restore_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) err = esp_input_done2(skb, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static int esp4_err(struct sk_buff *skb, u32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) const struct iphdr *iph = (const struct iphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct xfrm_state *x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) switch (icmp_hdr(skb)->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) case ICMP_DEST_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) case ICMP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) esph->spi, IPPROTO_ESP, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (!x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ipv4_redirect(skb, net, 0, IPPROTO_ESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) xfrm_state_put(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void esp_destroy(struct xfrm_state *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct crypto_aead *aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) crypto_free_aead(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int esp_init_aead(struct xfrm_state *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) char aead_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct crypto_aead *aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) aead = crypto_alloc_aead(aead_name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) err = PTR_ERR(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (IS_ERR(aead))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) x->data = aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) err = crypto_aead_setkey(aead, x->aead->alg_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) (x->aead->alg_key_len + 7) / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static int esp_init_authenc(struct xfrm_state *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct crypto_aead *aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct crypto_authenc_key_param *param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct rtattr *rta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) char *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) char authenc_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) unsigned int keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (!x->ealg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if ((x->props.flags & XFRM_STATE_ESN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) "%s%sauthencesn(%s,%s)%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) x->geniv ?: "", x->geniv ? "(" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) x->aalg ? x->aalg->alg_name : "digest_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) x->ealg->alg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) "%s%sauthenc(%s,%s)%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) x->geniv ?: "", x->geniv ? "(" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) x->aalg ? x->aalg->alg_name : "digest_null",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) x->ealg->alg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) aead = crypto_alloc_aead(authenc_name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) err = PTR_ERR(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (IS_ERR(aead))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) x->data = aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) key = kmalloc(keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) p = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) rta = (void *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rta->rta_len = RTA_LENGTH(sizeof(*param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) param = RTA_DATA(rta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) p += RTA_SPACE(sizeof(*param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (x->aalg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct xfrm_algo_desc *aalg_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) p += (x->aalg->alg_key_len + 7) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) BUG_ON(!aalg_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) crypto_aead_authsize(aead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pr_info("ESP: %s digestsize %u != %hu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) x->aalg->alg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) crypto_aead_authsize(aead),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) aalg_desc->uinfo.auth.icv_fullbits / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) goto free_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) err = crypto_aead_setauthsize(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) aead, x->aalg->alg_trunc_len / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) goto free_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) err = crypto_aead_setkey(aead, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) free_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int esp_init_state(struct xfrm_state *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct crypto_aead *aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) u32 align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) x->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (x->aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) err = esp_init_aead(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) err = esp_init_authenc(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) aead = x->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) x->props.header_len = sizeof(struct ip_esp_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) crypto_aead_ivsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (x->props.mode == XFRM_MODE_TUNNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) x->props.header_len += sizeof(struct iphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) x->props.header_len += IPV4_BEET_PHMAXLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (x->encap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct xfrm_encap_tmpl *encap = x->encap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) switch (encap->encap_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) case UDP_ENCAP_ESPINUDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) x->props.header_len += sizeof(struct udphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) case UDP_ENCAP_ESPINUDP_NON_IKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) #ifdef CONFIG_INET_ESPINTCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) case TCP_ENCAP_ESPINTCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* only the length field, TCP encap is done by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) x->props.header_len += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) align = ALIGN(crypto_aead_blocksize(aead), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int esp4_rcv_cb(struct sk_buff *skb, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static const struct xfrm_type esp_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .description = "ESP4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) .proto = IPPROTO_ESP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .flags = XFRM_TYPE_REPLAY_PROT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .init_state = esp_init_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .destructor = esp_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .input = esp_input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) .output = esp_output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static struct xfrm4_protocol esp4_protocol = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .handler = xfrm4_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .input_handler = xfrm_input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .cb_handler = esp4_rcv_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .err_handler = esp4_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static int __init esp4_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (xfrm_register_type(&esp_type, AF_INET) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) pr_info("%s: can't add xfrm type\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) pr_info("%s: can't add protocol\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) xfrm_unregister_type(&esp_type, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static void __exit esp4_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) pr_info("%s: can't remove protocol\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) xfrm_unregister_type(&esp_type, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) module_init(esp4_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) module_exit(esp4_fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);