Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <net/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct scatterlist *src = walk->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int diff = walk->offset - src->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	sg_set_page(sg, sg_page(src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		    src->length - diff, walk->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	scatterwalk_crypto_chain(sg, sg_next(src), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static int tls_enc_record(struct aead_request *aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			  struct crypto_aead *aead, char *aad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			  char *iv, __be64 rcd_sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			  struct scatter_walk *in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			  struct scatter_walk *out, int *in_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct scatterlist sg_in[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct scatterlist sg_out[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	len = min_t(int, *in_len, ARRAY_SIZE(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	scatterwalk_copychunks(buf, in, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	scatterwalk_copychunks(buf, out, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	*in_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (!*in_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	scatterwalk_pagedone(in, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	scatterwalk_pagedone(out, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	len = buf[4] | (buf[3] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		(char *)&rcd_sn, sizeof(rcd_sn), buf[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		TLS_1_2_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	       TLS_CIPHER_AES_GCM_128_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	chain_to_walk(sg_in + 1, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	chain_to_walk(sg_out + 1, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	*in_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (*in_len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		*in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		/* the input buffer doesn't contain the entire record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		 * trim len accordingly. The resulting authentication tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		 * will contain garbage, but we don't care, so we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		 * include any of it in the output skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		 * Note that we assume the output buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 * is larger then input buffer length + tag size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		if (*in_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			len += *in_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		*in_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (*in_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		scatterwalk_copychunks(NULL, in, len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		scatterwalk_pagedone(in, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		scatterwalk_copychunks(NULL, out, len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		scatterwalk_pagedone(out, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	rc = crypto_aead_encrypt(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void tls_init_aead_request(struct aead_request *aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 				  struct crypto_aead *aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	aead_request_set_tfm(aead_req, aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 						   gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	unsigned int req_size = sizeof(struct aead_request) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		crypto_aead_reqsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct aead_request *aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	aead_req = kzalloc(req_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (aead_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		tls_init_aead_request(aead_req, aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int tls_enc_records(struct aead_request *aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			   struct crypto_aead *aead, struct scatterlist *sg_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			   struct scatterlist *sg_out, char *aad, char *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			   u64 rcd_sn, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct scatter_walk out, in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	scatterwalk_start(&in, sg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	scatterwalk_start(&out, sg_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		rc = tls_enc_record(aead_req, aead, aad, iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 				    cpu_to_be64(rcd_sn), &in, &out, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		rcd_sn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	} while (rc == 0 && len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	scatterwalk_done(&in, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	scatterwalk_done(&out, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * might have been changed by NAT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void update_chksum(struct sk_buff *skb, int headln)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int datalen = skb->len - headln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	const struct ipv6hdr *ipv6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/* We only changed the payload so if we are using partial we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * need to update anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	skb->csum_start = skb_transport_header(skb) - skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	skb->csum_offset = offsetof(struct tcphdr, check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (skb->sk->sk_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		ipv6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 					     datalen, IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 					       IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	skb_copy_header(nskb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	skb_put(nskb, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	memcpy(nskb->data, skb->data, headln);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	nskb->destructor = skb->destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	nskb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	skb->destructor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	skb->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	update_chksum(nskb, headln);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/* sock_efree means skb must gone through skb_orphan_partial() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (nskb->destructor == sock_efree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	delta = nskb->truesize - skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (likely(delta < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	else if (delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		refcount_add(delta, &sk->sk_wmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* This function may be called after the user socket is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * closed so make sure we don't use anything freed during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * tls_sk_proto_close here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int fill_sg_in(struct scatterlist *sg_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		      struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		      struct tls_offload_context_tx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		      u64 *rcd_sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		      s32 *sync_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		      int *resync_sgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	int payload_len = skb->len - tcp_payload_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct tls_record_info *record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	int remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	spin_lock_irqsave(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	record = tls_get_record(ctx, tcp_seq, rcd_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (!record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	*sync_size = tcp_seq - tls_record_start_seq(record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (*sync_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		int is_start_marker = tls_record_is_start_marker(record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		/* This should only occur if the relevant record was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		 * already acked. In that case it should be ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		 * to drop the packet and avoid retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		 * There is a corner case where the packet contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		 * both an acked and a non-acked record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		 * We currently don't handle that case and rely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		 * on TCP to retranmit a packet that doesn't contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		 * already acked payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		if (!is_start_marker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			*sync_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	remaining = *sync_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	for (i = 0; remaining > 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		skb_frag_t *frag = &record->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		__skb_frag_ref(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		sg_set_page(sg_in + i, skb_frag_page(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			    skb_frag_size(frag), skb_frag_off(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		remaining -= skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		if (remaining < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 			sg_in[i].length += remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	*resync_sgs = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			struct sk_buff *nskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			int tcp_payload_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			int payload_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			int sync_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			void *dummy_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	/* Add room for authentication tag produced by crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	dummy_buf += sync_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 				   struct scatterlist sg_out[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 				   struct scatterlist *sg_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 				   struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 				   s32 sync_size, u64 rcd_sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	int payload_len = skb->len - tcp_payload_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	void *buf, *iv, *aad, *dummy_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct aead_request *aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct sk_buff *nskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	int buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (!aead_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		  TLS_CIPHER_AES_GCM_128_IV_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		  TLS_AAD_SPACE_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		  sync_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		  TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	buf = kmalloc(buf_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		goto free_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	iv = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	       TLS_CIPHER_AES_GCM_128_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	      TLS_CIPHER_AES_GCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	skb_reserve(nskb, skb_headroom(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		    payload_len, sync_size, dummy_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			    rcd_sn, sync_size + payload_len) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		goto free_nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	complete_skb(nskb, skb, tcp_payload_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	 * nskb->prev will point to the skb itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	nskb->prev = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) free_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	kfree(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	return nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) free_nskb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	nskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	int payload_len = skb->len - tcp_payload_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct scatterlist *sg_in, sg_out[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	struct sk_buff *nskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	int sg_in_max_elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	int resync_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	s32 sync_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	u64 rcd_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	/* worst case is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	 * MAX_SKB_FRAGS in tls_record_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (!payload_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (!sg_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		goto free_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	sg_init_table(sg_in, sg_in_max_elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		/* bypass packets before kernel TLS socket option was set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		if (sync_size < 0 && payload_len <= -sync_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			nskb = skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		goto put_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) put_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	while (resync_sgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		put_page(sg_page(&sg_in[--resync_sgs]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	kfree(sg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) free_orig:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 				      struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				      struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (dev == tls_get_ctx(sk)->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return tls_sw_fallback(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 					 struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 					 struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	return tls_sw_fallback(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	return tls_sw_fallback(skb->sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) EXPORT_SYMBOL_GPL(tls_encrypt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int tls_sw_fallback_init(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			 struct tls_offload_context_tx *offload_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			 struct tls_crypto_info *crypto_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	const u8 *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	offload_ctx->aead_send =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	    crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	if (IS_ERR(offload_ctx->aead_send)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		rc = PTR_ERR(offload_ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		offload_ctx->aead_send = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	rc = crypto_aead_setkey(offload_ctx->aead_send, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 				TLS_CIPHER_AES_GCM_128_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		goto free_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	rc = crypto_aead_setauthsize(offload_ctx->aead_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 				     TLS_CIPHER_AES_GCM_128_TAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		goto free_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) free_aead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	crypto_free_aead(offload_ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }