^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <net/strparser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <net/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) noinline void tls_err_abort(struct sock *sk, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) WARN_ON_ONCE(err >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* sk->sk_err should contain a positive error code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) sk->sk_err = -err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) sk->sk_error_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static int __skb_nsg(struct sk_buff *skb, int offset, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int recursion_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int start = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int i, chunk = start - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct sk_buff *frag_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int elt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (unlikely(recursion_level >= 24))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (chunk > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (chunk > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) chunk = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) elt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) len -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) offset += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) WARN_ON(start > offset + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) chunk = end - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (chunk > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (chunk > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) chunk = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) elt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) len -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) offset += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (unlikely(skb_has_frag_list(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) skb_walk_frags(skb, frag_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int end, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) WARN_ON(start > offset + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) end = start + frag_iter->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) chunk = end - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (chunk > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (chunk > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) chunk = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = __skb_nsg(frag_iter, offset - start, chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) recursion_level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) elt += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) len -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) offset += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) BUG_ON(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Return the number of scatterlist elements required to completely map the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * skb, or -EMSGSIZE if the recursion depth is exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int skb_nsg(struct sk_buff *skb, int offset, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return __skb_nsg(skb, offset, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int padding_length(struct tls_sw_context_rx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct tls_prot_info *prot, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int sub = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Determine zero-padding length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (prot->version == TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) char content_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int back = 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) while (content_type == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (back > rxm->full_len - prot->prepend_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) err = skb_copy_bits(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) rxm->offset + rxm->full_len - back,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) &content_type, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (content_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) sub++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) back++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ctx->control = content_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void tls_decrypt_done(struct crypto_async_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct aead_request *aead_req = (struct aead_request *)req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct scatterlist *sgout = aead_req->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct scatterlist *sgin = aead_req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct tls_sw_context_rx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct tls_context *tls_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct tls_prot_info *prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) skb = (struct sk_buff *)req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) tls_ctx = tls_get_ctx(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Propagate if there was an err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (err == -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) TLS_INC_STATS(sock_net(skb->sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) LINUX_MIB_TLSDECRYPTERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ctx->async_wait.err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) tls_err_abort(skb->sk, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pad = padding_length(ctx, prot, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (pad < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ctx->async_wait.err = pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) tls_err_abort(skb->sk, pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rxm->full_len -= pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rxm->offset += prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rxm->full_len -= prot->overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* After using skb->sk to propagate sk through crypto async callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * we need to NULL it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) skb->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Free the destination pages if skb was not decrypted inplace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (sgout != sgin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Skip the first S/G entry as it points to AAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) put_page(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) kfree(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) spin_lock_bh(&ctx->decrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pending = atomic_dec_return(&ctx->decrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!pending && ctx->async_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) complete(&ctx->async_wait.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) spin_unlock_bh(&ctx->decrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int tls_do_decryption(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct scatterlist *sgin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct scatterlist *sgout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) char *iv_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) size_t data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct aead_request *aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) aead_request_set_tfm(aead_req, ctx->aead_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) aead_request_set_ad(aead_req, prot->aad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) aead_request_set_crypt(aead_req, sgin, sgout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) data_len + prot->tag_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) (u8 *)iv_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Using skb->sk to push sk through to crypto async callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * handler. This allows propagating errors up to the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * if needed. It _must_ be cleared in the async handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * before consume_skb is called. We _know_ skb->sk is NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * because it is a clone from strparser.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) aead_request_set_callback(aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) tls_decrypt_done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) atomic_inc(&ctx->decrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) aead_request_set_callback(aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) crypto_req_done, &ctx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = crypto_aead_decrypt(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (ret == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ret = crypto_wait_req(ret, &ctx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) atomic_dec(&ctx->decrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void tls_trim_both_msgs(struct sock *sk, int target_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct tls_rec *rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) sk_msg_trim(sk, &rec->msg_plaintext, target_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (target_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) target_size += prot->overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) sk_msg_trim(sk, &rec->msg_encrypted, target_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int tls_alloc_encrypted_msg(struct sock *sk, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct tls_rec *rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct sk_msg *msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return sk_msg_alloc(sk, msg_en, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int tls_clone_plaintext_msg(struct sock *sk, int required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct tls_rec *rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct sk_msg *msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct sk_msg *msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int skip, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* We add page references worth len bytes from encrypted sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * at the end of plaintext sg. It is guaranteed that msg_en
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * has enough required room (ensured by caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) len = required - msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* Skip initial bytes in msg_en's data to be able to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * same offset of both plain and encrypted data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) skip = prot->prepend_size + msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static struct tls_rec *tls_get_rec(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct sk_msg *msg_pl, *msg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct tls_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rec = kzalloc(mem_size, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sk_msg_init(msg_pl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) sk_msg_init(msg_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sg_init_table(rec->sg_aead_in, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) sg_unmark_end(&rec->sg_aead_in[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) sg_init_table(rec->sg_aead_out, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) sg_unmark_end(&rec->sg_aead_out[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) sk_msg_free(sk, &rec->msg_encrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) sk_msg_free(sk, &rec->msg_plaintext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) kfree(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void tls_free_open_rec(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct tls_rec *rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) tls_free_rec(sk, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ctx->open_rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int tls_tx_records(struct sock *sk, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct tls_rec *rec, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct sk_msg *msg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int tx_flags, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (tls_is_partially_sent_record(tls_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rec = list_first_entry(&ctx->tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct tls_rec, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (flags == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) tx_flags = rec->tx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) tx_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Full record has been transmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Remove the head of tx_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) list_del(&rec->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) sk_msg_free(sk, &rec->msg_plaintext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) kfree(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Tx all ready records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (READ_ONCE(rec->tx_ready)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (flags == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tx_flags = rec->tx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) tx_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rc = tls_push_sg(sk, tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) &msg_en->sg.data[msg_en->sg.curr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 0, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) list_del(&rec->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) sk_msg_free(sk, &rec->msg_plaintext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) kfree(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) tx_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (rc < 0 && rc != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tls_err_abort(sk, -EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void tls_encrypt_done(struct crypto_async_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct aead_request *aead_req = (struct aead_request *)req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct sock *sk = req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct scatterlist *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct sk_msg *msg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct tls_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bool ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rec = container_of(aead_req, struct tls_rec, aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) sge = sk_msg_elem(msg_en, msg_en->sg.curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) sge->offset -= prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) sge->length += prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Check if error is previously set on socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (err || sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* If err is already set on socket, return the same code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ctx->async_wait.err = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ctx->async_wait.err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tls_err_abort(sk, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct tls_rec *first_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Mark the record as ready for transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) smp_store_mb(rec->tx_ready, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* If received record is at head of tx_list, schedule tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) first_rec = list_first_entry(&ctx->tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct tls_rec, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (rec == first_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_lock_bh(&ctx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pending = atomic_dec_return(&ctx->encrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!pending && ctx->async_notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) complete(&ctx->async_wait.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) spin_unlock_bh(&ctx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Schedule the transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) schedule_delayed_work(&ctx->tx_work.work, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int tls_do_encryption(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct tls_sw_context_tx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct aead_request *aead_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) size_t data_len, u32 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct tls_rec *rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct sk_msg *msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct scatterlist *sge = sk_msg_elem(msg_en, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int rc, iv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* For CCM based ciphers, first byte of IV is a constant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) iv_offset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) prot->iv_size + prot->salt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) sge->offset += prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sge->length -= prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) msg_en->sg.curr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) aead_request_set_tfm(aead_req, ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) aead_request_set_ad(aead_req, prot->aad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) aead_request_set_crypt(aead_req, rec->sg_aead_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) rec->sg_aead_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) data_len, rec->iv_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) tls_encrypt_done, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Add the record in tx_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) atomic_inc(&ctx->encrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) rc = crypto_aead_encrypt(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!rc || rc != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) atomic_dec(&ctx->encrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) sge->offset -= prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) sge->length += prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) WRITE_ONCE(rec->tx_ready, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else if (rc != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_del(&rec->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Unhook the record from context if encryption is not failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ctx->open_rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) tls_advance_record_sn(sk, prot, &tls_ctx->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct tls_rec **to, struct sk_msg *msg_opl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct sk_msg *msg_oen, u32 split_point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) u32 tx_overhead_size, u32 *orig_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct scatterlist *sge, *osge, *nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u32 orig_size = msg_opl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct scatterlist tmp = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct sk_msg *msg_npl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct tls_rec *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) new = tls_get_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) tx_overhead_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) tls_free_rec(sk, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) *orig_end = msg_opl->sg.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) i = msg_opl->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sge = sk_msg_elem(msg_opl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) while (apply && sge->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (sge->length > apply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u32 len = sge->length - apply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) get_page(sg_page(sge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sg_set_page(&tmp, sg_page(sge), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) sge->offset + apply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) sge->length = apply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bytes += apply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) apply = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) apply -= sge->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) bytes += sge->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) sk_msg_iter_var_next(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (i == msg_opl->sg.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) sge = sk_msg_elem(msg_opl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) msg_opl->sg.end = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) msg_opl->sg.curr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) msg_opl->sg.copybreak = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) msg_opl->apply_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) msg_opl->sg.size = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) msg_npl = &new->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) msg_npl->apply_bytes = apply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) msg_npl->sg.size = orig_size - bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) j = msg_npl->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) nsge = sk_msg_elem(msg_npl, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (tmp.length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) memcpy(nsge, &tmp, sizeof(*nsge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) sk_msg_iter_var_next(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) nsge = sk_msg_elem(msg_npl, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) osge = sk_msg_elem(msg_opl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) while (osge->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) memcpy(nsge, osge, sizeof(*nsge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sg_unmark_end(nsge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) sk_msg_iter_var_next(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) sk_msg_iter_var_next(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (i == *orig_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) osge = sk_msg_elem(msg_opl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) nsge = sk_msg_elem(msg_npl, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) msg_npl->sg.end = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) msg_npl->sg.curr = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) msg_npl->sg.copybreak = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *to = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct tls_rec *from, u32 orig_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct sk_msg *msg_npl = &from->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct sk_msg *msg_opl = &to->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct scatterlist *osge, *nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) u32 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) i = msg_opl->sg.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) sk_msg_iter_var_prev(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) j = msg_npl->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) osge = sk_msg_elem(msg_opl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) nsge = sk_msg_elem(msg_npl, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (sg_page(osge) == sg_page(nsge) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) osge->offset + osge->length == nsge->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) osge->length += nsge->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) put_page(sg_page(nsge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) msg_opl->sg.end = orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) msg_opl->sg.curr = orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) msg_opl->sg.copybreak = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) msg_opl->sg.size += msg_npl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sk_msg_free(sk, &to->msg_encrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) kfree(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static int tls_push_record(struct sock *sk, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned char record_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 i, split_point, orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct sk_msg *msg_pl, *msg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bool split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) split_point = msg_pl->apply_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) split = split_point && split_point < msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (unlikely((!split &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) msg_pl->sg.size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) prot->overhead_size > msg_en->sg.size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) (split &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) split_point +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) prot->overhead_size > msg_en->sg.size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) split = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) split_point = msg_en->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) split_point, prot->overhead_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) &orig_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* This can happen if above tls_split_open_record allocates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * a single large encryption buffer instead of two smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * ones. In this case adjust pointers and continue without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!msg_pl->sg.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) tls_merge_open_record(sk, rec, tmp, orig_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) split = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) sk_msg_trim(sk, msg_en, msg_pl->sg.size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) prot->overhead_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) rec->tx_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) req = &rec->aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) i = msg_pl->sg.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) sk_msg_iter_var_prev(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) rec->content_type = record_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (prot->version == TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Add content type to end of message. No padding added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) sg_mark_end(&rec->sg_content_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) &rec->sg_content_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) sg_mark_end(sk_msg_elem(msg_pl, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (msg_pl->sg.end < msg_pl->sg.start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) MAX_SKB_FRAGS - msg_pl->sg.start + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) msg_pl->sg.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) i = msg_pl->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) i = msg_en->sg.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sk_msg_iter_var_prev(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) sg_mark_end(sk_msg_elem(msg_en, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) i = msg_en->sg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) tls_ctx->tx.rec_seq, prot->rec_seq_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) record_type, prot->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) tls_fill_prepend(tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) page_address(sg_page(&msg_en->sg.data[i])) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) msg_en->sg.data[i].offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) msg_pl->sg.size + prot->tail_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) record_type, prot->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) tls_ctx->pending_open_record_frags = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) rc = tls_do_encryption(sk, tls_ctx, ctx, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) msg_pl->sg.size + prot->tail_size, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (rc != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) tls_err_abort(sk, -EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) tls_ctx->pending_open_record_frags = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) tls_merge_open_record(sk, rec, tmp, orig_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ctx->async_capable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) } else if (split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) msg_pl = &tmp->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) msg_en = &tmp->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) tls_ctx->pending_open_record_frags = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ctx->open_rec = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return tls_tx_records(sk, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) bool full_record, u8 record_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ssize_t *copied, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct sk_msg msg_redir = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct sock *sk_redir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct tls_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) bool enospc, policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int err = 0, send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) policy = !(flags & MSG_SENDPAGE_NOPOLICY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!psock || !policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err = tls_push_record(sk, flags, record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (err && sk->sk_err == EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *copied -= sk_msg_free(sk, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) tls_free_open_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) err = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) more_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) enospc = sk_msg_full(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (psock->eval == __SK_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) delta = msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) psock->eval = sk_psock_msg_verdict(sk, psock, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) delta -= msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) !enospc && !full_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) msg->cork_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) send = msg->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (msg->apply_bytes && msg->apply_bytes < send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) send = msg->apply_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) switch (psock->eval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case __SK_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) err = tls_push_record(sk, flags, record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (err && sk->sk_err == EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) *copied -= sk_msg_free(sk, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) tls_free_open_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) err = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) case __SK_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) sk_redir = psock->sk_redir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) memcpy(&msg_redir, msg, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (msg->apply_bytes < send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) msg->apply_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) msg->apply_bytes -= send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) sk_msg_return_zero(sk, msg, send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) msg->sg.size -= send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) *copied -= sk_msg_free_nocharge(sk, &msg_redir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) msg->sg.size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (msg->sg.size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) tls_free_open_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) case __SK_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sk_msg_free_partial(sk, msg, send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (msg->apply_bytes < send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) msg->apply_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) msg->apply_bytes -= send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (msg->sg.size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) tls_free_open_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *copied -= (send + delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) bool reset_eval = !ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) msg = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (!msg->apply_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) reset_eval = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (reset_eval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) psock->eval = __SK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (psock->sk_redir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) sock_put(psock->sk_redir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) psock->sk_redir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static int tls_sw_push_pending_record(struct sock *sk, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct tls_rec *rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct sk_msg *msg_pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) size_t copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) copied = msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) &copied, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) bool async_capable = ctx->async_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) unsigned char record_type = TLS_RECORD_TYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) bool eor = !(msg->msg_flags & MSG_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) size_t try_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ssize_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct sk_msg *msg_pl, *msg_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct tls_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) int required_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int num_async = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) bool full_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int record_room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int num_zc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) MSG_CMSG_COMPAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) mutex_lock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (unlikely(msg->msg_controllen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ret = tls_proccess_cmsg(sk, msg, &record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (ret == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) num_async++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) else if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) while (msg_data_left(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (ctx->open_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rec = ctx->open_rec = tls_get_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) msg_en = &rec->msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) orig_size = msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) full_record = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) try_to_copy = msg_data_left(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (try_to_copy >= record_room) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) try_to_copy = record_room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) full_record = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) required_size = msg_pl->sg.size + try_to_copy +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) prot->overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (!sk_stream_memory_free(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto wait_for_sndbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) alloc_encrypted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ret = tls_alloc_encrypted_msg(sk, required_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Adjust try_to_copy according to the amount that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * actually allocated. The difference is due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * to max sg elements limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) try_to_copy -= required_size - msg_en->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) full_record = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!is_kvec && (full_record || eor) && !async_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) u32 first = msg_pl->sg.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) msg_pl, try_to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto fallback_to_reg_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) num_zc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) copied += try_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) sk_msg_sg_copy_set(msg_pl, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) record_type, &copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) msg->msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (ret == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) num_async++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) else if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) else if (ctx->open_rec && ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto rollback_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) else if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) rollback_iter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) copied -= try_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) sk_msg_sg_copy_clear(msg_pl, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) iov_iter_revert(&msg->msg_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) msg_pl->sg.size - orig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) fallback_to_reg_send:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) sk_msg_trim(sk, msg_pl, orig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) required_size = msg_pl->sg.size + try_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ret = tls_clone_plaintext_msg(sk, required_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Adjust try_to_copy according to the amount that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * actually allocated. The difference is due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * to max sg elements limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) try_to_copy -= required_size - msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) full_record = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) sk_msg_trim(sk, msg_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) msg_pl->sg.size + prot->overhead_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (try_to_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) msg_pl, try_to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto trim_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Open records defined only if successfully copied, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * we would trim the sg but not reset the open record frags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) tls_ctx->pending_open_record_frags = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) copied += try_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (full_record || eor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) record_type, &copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) msg->msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (ret == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) num_async++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) else if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) else if (ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) wait_for_sndbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) wait_for_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ret = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) trim_sgl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (ctx->open_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) tls_trim_both_msgs(sk, orig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (ctx->open_rec && msg_en->sg.size < required_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto alloc_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (!num_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) goto send_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) } else if (num_zc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Wait for pending encryptions to get completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) spin_lock_bh(&ctx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ctx->async_notify = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) pending = atomic_read(&ctx->encrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) spin_unlock_bh(&ctx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) reinit_completion(&ctx->async_wait.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* There can be no concurrent accesses, since we have no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * pending encrypt operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) WRITE_ONCE(ctx->async_notify, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (ctx->async_wait.err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ret = ctx->async_wait.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Transmit if any encryptions have completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cancel_delayed_work(&ctx->tx_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) tls_tx_records(sk, msg->msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) send_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = sk_stream_error(sk, msg->msg_flags, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) mutex_unlock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return copied > 0 ? copied : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int offset, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) unsigned char record_type = TLS_RECORD_TYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct sk_msg *msg_pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct tls_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) int num_async = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ssize_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) bool full_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int record_room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) bool eor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) eor = !(flags & MSG_SENDPAGE_NOTLAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* Call the sk_stream functions to manage the sndbuf mem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) while (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) size_t copy, required_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ret = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto sendpage_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ctx->open_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) rec = ctx->open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) rec = ctx->open_rec = tls_get_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto sendpage_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) msg_pl = &rec->msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) full_record = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) copy = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (copy >= record_room) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) copy = record_room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) full_record = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) required_size = msg_pl->sg.size + copy + prot->overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!sk_stream_memory_free(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto wait_for_sndbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) alloc_payload:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ret = tls_alloc_encrypted_msg(sk, required_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* Adjust copy according to the amount that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * actually allocated. The difference is due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * to max sg elements limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) copy -= required_size - msg_pl->sg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) full_record = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) sk_msg_page_add(msg_pl, page, copy, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) sk_mem_charge(sk, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) size -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) copied += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) tls_ctx->pending_open_record_frags = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (full_record || eor || sk_msg_full(msg_pl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) record_type, &copied, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (ret == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) num_async++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) else if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto wait_for_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) else if (ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) goto sendpage_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) wait_for_sndbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) wait_for_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ret = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (ctx->open_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) tls_trim_both_msgs(sk, msg_pl->sg.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) goto sendpage_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (ctx->open_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) goto alloc_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (num_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* Transmit if any encryptions have completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) cancel_delayed_work(&ctx->tx_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) tls_tx_records(sk, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) sendpage_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ret = sk_stream_error(sk, flags, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return copied > 0 ? copied : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) int offset, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) MSG_NO_SHARED_FRAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return tls_sw_do_sendpage(sk, page, offset, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int tls_sw_sendpage(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) int offset, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mutex_lock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) mutex_unlock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) bool nonblock, long timeo, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) *err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (!skb_queue_empty(&sk->sk_receive_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) __strp_unpause(&ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (ctx->recv_pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return ctx->recv_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (sock_flag(sk, SOCK_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (nonblock || !timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) *err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) sk_wait_event(sk, &timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ctx->recv_pkt != skb ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) !sk_psock_queue_empty(psock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Handle signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) *err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int length, int *pages_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) unsigned int *size_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct scatterlist *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) int to_max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) int rc = 0, i = 0, num_elem = *pages_used, maxpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct page *pages[MAX_SKB_FRAGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) unsigned int size = *size_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ssize_t copied, use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) while (length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) maxpages = to_max_pages - num_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (maxpages == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) copied = iov_iter_get_pages(from, pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) maxpages, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (copied <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) iov_iter_advance(from, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) length -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) size += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) while (copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) use = min_t(int, copied, PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) sg_set_page(&to[num_elem],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) pages[i], use, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) sg_unmark_end(&to[num_elem]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* We do not uncharge memory from this API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) copied -= use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) num_elem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Mark the end in the last sg entry if newly added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (num_elem > *pages_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) sg_mark_end(&to[num_elem - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) iov_iter_revert(from, size - *size_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) *size_used = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) *pages_used = num_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* This function decrypts the input skb into either out_iov or in out_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * or in skb buffers itself. The input parameter 'zc' indicates if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * zero-copy mode needs to be tried or not. With zero-copy mode, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * NULL, then the decryption happens inside skb buffers itself, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * zero-copy gets disabled and 'zc' is updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct iov_iter *out_iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct scatterlist *out_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int *chunk, bool *zc, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct aead_request *aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct sk_buff *unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) u8 *aad, *iv, *mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct scatterlist *sgin = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct scatterlist *sgout = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) const int data_len = rxm->full_len - prot->overhead_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) prot->tail_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int iv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (*zc && (out_iov || out_sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (out_iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) n_sgout = sg_nents(out_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) rxm->full_len - prot->prepend_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) n_sgout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) *zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) n_sgin = skb_cow_data(skb, 0, &unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (n_sgin < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /* Increment to accommodate AAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) n_sgin = n_sgin + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) nsg = n_sgin + n_sgout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) mem_size = aead_size + (nsg * sizeof(struct scatterlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) mem_size = mem_size + prot->aad_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* Allocate a single block of memory which contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * aead_req || sgin[] || sgout[] || aad || iv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * This order achieves correct alignment for aead_req, sgin, sgout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) mem = kmalloc(mem_size, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Segment the allocated memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) aead_req = (struct aead_request *)mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) sgin = (struct scatterlist *)(mem + aead_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) sgout = sgin + n_sgin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) aad = (u8 *)(sgout + n_sgout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) iv = aad + prot->aad_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* For CCM based ciphers, first byte of nonce+iv is always '2' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) iv[0] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) iv_offset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* Prepare IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) iv + iv_offset + prot->salt_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) prot->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) kfree(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (prot->version == TLS_1_3_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) memcpy(iv + iv_offset, tls_ctx->rx.iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) crypto_aead_ivsize(ctx->aead_recv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Prepare AAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) tls_make_aad(aad, rxm->full_len - prot->overhead_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) prot->tail_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) tls_ctx->rx.rec_seq, prot->rec_seq_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) ctx->control, prot->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /* Prepare sgin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) sg_init_table(sgin, n_sgin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) sg_set_buf(&sgin[0], aad, prot->aad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) err = skb_to_sgvec(skb, &sgin[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) rxm->offset + prot->prepend_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) rxm->full_len - prot->prepend_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) kfree(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (n_sgout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (out_iov) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) sg_init_table(sgout, n_sgout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) sg_set_buf(&sgout[0], aad, prot->aad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) *chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) err = tls_setup_from_iter(sk, out_iov, data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) &pages, chunk, &sgout[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) (n_sgout - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) goto fallback_to_reg_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) } else if (out_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto fallback_to_reg_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) fallback_to_reg_recv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) sgout = sgin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) *chunk = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) *zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* Prepare and submit AEAD request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) err = tls_do_decryption(sk, skb, sgin, sgout, iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) data_len, aead_req, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (err == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* Release the pages in case iov was mapped to pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) for (; pages > 0; pages--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) put_page(sg_page(&sgout[pages]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) kfree(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) struct iov_iter *dest, int *chunk, bool *zc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) int pad, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (!ctx->decrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (tls_ctx->rx_conf == TLS_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* Still not decrypted after tls_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!ctx->decrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (err == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) tls_advance_record_sn(sk, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) &tls_ctx->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) else if (err == -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) TLS_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) LINUX_MIB_TLSDECRYPTERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) *zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) pad = padding_length(ctx, prot, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (pad < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) rxm->full_len -= pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) rxm->offset += prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) rxm->full_len -= prot->overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) tls_advance_record_sn(sk, prot, &tls_ctx->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ctx->decrypted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ctx->saved_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) *zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int decrypt_skb(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct scatterlist *sgout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) bool zc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) int chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (len < rxm->full_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) rxm->offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) rxm->full_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* Finished with message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ctx->recv_pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) __strp_unpause(&ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* This function traverses the rx_list in tls receive context to copies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * decrypted records into the buffer provided by caller zero copy is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * true. Further, the records are removed from the rx_list if it is not a peek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * case and the record has been consumed completely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static int process_rx_list(struct tls_sw_context_rx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) u8 *control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) bool *cmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) size_t skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) bool zc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) bool is_peek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct sk_buff *skb = skb_peek(&ctx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) u8 ctrl = *control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) u8 msgc = *cmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) struct tls_msg *tlm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) ssize_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /* Set the record type in 'control' if caller didn't pass it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (!ctrl && skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) tlm = tls_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) ctrl = tlm->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) while (skip && skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) tlm = tls_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /* Cannot process a record of different type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (ctrl != tlm->control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (skip < rxm->full_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) skip = skip - rxm->full_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) skb = skb_peek_next(skb, &ctx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) while (len && skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct sk_buff *next_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) int chunk = min_t(unsigned int, rxm->full_len - skip, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) tlm = tls_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /* Cannot process a record of different type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (ctrl != tlm->control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* Set record type if not already done. For a non-data record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * do not proceed if record type could not be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!msgc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) sizeof(ctrl), &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) msgc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (ctrl != TLS_RECORD_TYPE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (cerr || msg->msg_flags & MSG_CTRUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) *cmsg = msgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (!zc || (rxm->full_len - skip) > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) msg, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) len = len - chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) copied = copied + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /* Consume the data from record if it is non-peek case*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (!is_peek) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) rxm->offset = rxm->offset + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) rxm->full_len = rxm->full_len - chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /* Return if there is unconsumed data in the record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (rxm->full_len - skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /* The remaining skip-bytes must lie in 1st record in rx_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * So from the 2nd record, 'skip' should be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) msg->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) next_skb = skb_peek_next(skb, &ctx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (!is_peek) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) skb_unlink(skb, &ctx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) skb = next_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) *control = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) int tls_sw_recvmsg(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int nonblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) int *addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) unsigned char control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) ssize_t decrypted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct strp_msg *rxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct tls_msg *tlm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ssize_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) bool cmsg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) int target, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) bool is_peek = flags & MSG_PEEK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) bool bpf_strp_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int num_async = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) flags |= nonblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (unlikely(flags & MSG_ERRQUEUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) bpf_strp_enabled = sk_psock_strp_enabled(psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) /* Process pending decrypted records. It must be non-zero-copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) is_peek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) tls_err_abort(sk, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) copied = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (len <= copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) len = len - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) while (len && (decrypted + copied < target || ctx->recv_pkt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) bool retain_skb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) bool zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) int to_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) int chunk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) bool async_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) bool async = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) int ret = __tcp_bpf_recvmsg(sk, psock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) msg, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) decrypted += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) tlm = tls_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (prot->version == TLS_1_3_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) tlm->control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) tlm->control = ctx->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) to_decrypt = rxm->full_len - prot->overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (to_decrypt <= len && !is_kvec && !is_peek &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ctx->control == TLS_RECORD_TYPE_DATA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) prot->version != TLS_1_3_VERSION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) !bpf_strp_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) zc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /* Do not use async mode if record is non-data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) async_capable = ctx->async_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) async_capable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) err = decrypt_skb_update(sk, skb, &msg->msg_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) &chunk, &zc, async_capable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (err < 0 && err != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) tls_err_abort(sk, -EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (err == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) num_async++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) } else if (prot->version == TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) tlm->control = ctx->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* If the type of records being processed is not known yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * set it to record type just dequeued. If it is already known,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * but does not match the record type just dequeued, go to end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * We always get record type here since for tls1.2, record type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * is known just after record is dequeued from stream parser.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * For tls1.3, we disable async.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (!control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) control = tlm->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) else if (control != tlm->control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (!cmsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) int cerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) sizeof(control), &control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) cmsg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (control != TLS_RECORD_TYPE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (cerr || msg->msg_flags & MSG_CTRUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) goto pick_next_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (!zc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (bpf_strp_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) err = sk_psock_tls_strp_read(psock, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (err != __SK_PASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) rxm->offset = rxm->offset + rxm->full_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) rxm->full_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (err == __SK_DROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ctx->recv_pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) __strp_unpause(&ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (rxm->full_len > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) retain_skb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) chunk = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) chunk = rxm->full_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) err = skb_copy_datagram_msg(skb, rxm->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) msg, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!is_peek) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) rxm->offset = rxm->offset + chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) rxm->full_len = rxm->full_len - chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) pick_next_record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (chunk > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) chunk = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) decrypted += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) len -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) /* For async or peek case, queue the current skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (async || is_peek || retain_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) skb_queue_tail(&ctx->rx_list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (tls_sw_advance_skb(sk, skb, chunk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /* Return full control message to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * userspace before trying to parse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * another message type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) msg->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (control != TLS_RECORD_TYPE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) goto recv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) recv_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (num_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) /* Wait for all previously submitted records to be decrypted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) spin_lock_bh(&ctx->decrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) ctx->async_notify = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) pending = atomic_read(&ctx->decrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) spin_unlock_bh(&ctx->decrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* one of async decrypt failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) tls_err_abort(sk, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) decrypted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) reinit_completion(&ctx->async_wait.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) /* There can be no concurrent accesses, since we have no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * pending decrypt operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) WRITE_ONCE(ctx->async_notify, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) /* Drain records from the rx_list & copy if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) if (is_peek || is_kvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) err = process_rx_list(ctx, msg, &control, &cmsg, copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) decrypted, false, is_peek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) err = process_rx_list(ctx, msg, &control, &cmsg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) decrypted, true, is_peek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) tls_err_abort(sk, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) copied += decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return copied ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) struct strp_msg *rxm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ssize_t copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) int chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) bool zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) goto splice_read_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) tls_err_abort(sk, -EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) goto splice_read_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /* splice does not support reading control messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (ctx->control != TLS_RECORD_TYPE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) goto splice_read_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) chunk = min_t(unsigned int, rxm->full_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (copied < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) goto splice_read_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (likely(!(flags & MSG_PEEK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) tls_sw_advance_skb(sk, skb, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) splice_read_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return copied ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) bool tls_sw_stream_read(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) bool ingress_empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) psock = sk_psock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (psock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) ingress_empty = list_empty(&psock->ingress_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return !ingress_empty || ctx->recv_pkt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) !skb_queue_empty(&ctx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) size_t cipher_overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) size_t data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* Verify that we have a full TLS header, or wait for more data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (rxm->offset + prot->prepend_size > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* Sanity-check size of on-stack buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (WARN_ON(prot->prepend_size > sizeof(header))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) goto read_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* Linearize header to local buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) goto read_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) ctx->control = header[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) data_len = ((header[4] & 0xFF) | (header[3] << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) cipher_overhead = prot->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (prot->version != TLS_1_3_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) cipher_overhead += prot->iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) prot->tail_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) goto read_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (data_len < cipher_overhead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ret = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) goto read_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (header[1] != TLS_1_2_VERSION_MINOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) header[2] != TLS_1_2_VERSION_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) goto read_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) TCP_SKB_CB(skb)->seq + rxm->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return data_len + TLS_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) read_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) tls_err_abort(strp->sk, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) static void tls_queue(struct strparser *strp, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) ctx->decrypted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) ctx->recv_pkt = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) strp_pause(strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ctx->saved_data_ready(strp->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) static void tls_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct sk_psock *psock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) strp_data_ready(&ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) psock = sk_psock_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (psock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (!list_empty(&psock->ingress_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) ctx->saved_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) sk_psock_put(sk, psock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) cancel_delayed_work_sync(&ctx->tx_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) void tls_sw_release_resources_tx(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct tls_rec *rec, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /* Wait for any pending async encryptions to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) spin_lock_bh(&ctx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) ctx->async_notify = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) pending = atomic_read(&ctx->encrypt_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) spin_unlock_bh(&ctx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) tls_tx_records(sk, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* Free up un-sent records in tx_list. First, free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * the partially sent record if any at head of tx_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (tls_ctx->partially_sent_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) tls_free_partial_record(sk, tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) rec = list_first_entry(&ctx->tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) struct tls_rec, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) list_del(&rec->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) sk_msg_free(sk, &rec->msg_plaintext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) kfree(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) list_del(&rec->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) sk_msg_free(sk, &rec->msg_encrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) sk_msg_free(sk, &rec->msg_plaintext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) kfree(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) crypto_free_aead(ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) tls_free_open_rec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) void tls_sw_release_resources_rx(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) kfree(tls_ctx->rx.rec_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) kfree(tls_ctx->rx.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (ctx->aead_recv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) kfree_skb(ctx->recv_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ctx->recv_pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) skb_queue_purge(&ctx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) crypto_free_aead(ctx->aead_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) strp_stop(&ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) /* If tls_sw_strparser_arm() was not called (cleanup paths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * we still want to strp_stop(), but sk->sk_data_ready was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * never swapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (ctx->saved_data_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) sk->sk_data_ready = ctx->saved_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) void tls_sw_strparser_done(struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) strp_done(&ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) void tls_sw_free_resources_rx(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) tls_sw_release_resources_rx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) tls_sw_free_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* The work handler to transmitt the encrypted records in tx_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) static void tx_work_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct delayed_work *delayed_work = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) struct tx_work *tx_work = container_of(delayed_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) struct tx_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct sock *sk = tx_work->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) struct tls_sw_context_tx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (unlikely(!tls_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ctx = tls_sw_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) mutex_lock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) tls_tx_records(sk, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) mutex_unlock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /* Schedule the transmission if tx list is ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (is_tx_ready(tx_ctx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) schedule_delayed_work(&tx_ctx->tx_work.work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) rx_ctx->saved_data_ready = sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) sk->sk_data_ready = tls_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) strp_check_rcv(&rx_ctx->strp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct tls_crypto_info *crypto_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) struct tls_sw_context_tx *sw_ctx_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct tls_sw_context_rx *sw_ctx_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct cipher_context *cctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) struct crypto_aead **aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct strp_callbacks cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) struct crypto_tfm *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) char *iv, *rec_seq, *key, *salt, *cipher_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) size_t keysize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (!ctx->priv_ctx_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (!sw_ctx_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) ctx->priv_ctx_tx = sw_ctx_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) sw_ctx_tx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (!ctx->priv_ctx_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (!sw_ctx_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) ctx->priv_ctx_rx = sw_ctx_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) sw_ctx_rx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) crypto_init_wait(&sw_ctx_tx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) crypto_info = &ctx->crypto_send.info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) cctx = &ctx->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) aead = &sw_ctx_tx->aead_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) sw_ctx_tx->tx_work.sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) crypto_init_wait(&sw_ctx_rx->async_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) crypto_info = &ctx->crypto_recv.info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) cctx = &ctx->rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) skb_queue_head_init(&sw_ctx_rx->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) aead = &sw_ctx_rx->aead_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) switch (crypto_info->cipher_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) case TLS_CIPHER_AES_GCM_128: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) rec_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) gcm_128_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) key = gcm_128_info->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) salt = gcm_128_info->salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) cipher_name = "gcm(aes)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) case TLS_CIPHER_AES_GCM_256: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) rec_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) gcm_256_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) key = gcm_256_info->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) salt = gcm_256_info->salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) cipher_name = "gcm(aes)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) case TLS_CIPHER_AES_CCM_128: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) rec_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) ccm_128_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) key = ccm_128_info->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) salt = ccm_128_info->salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) cipher_name = "ccm(aes)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) goto free_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) /* Sanity-check the sizes for stack allocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) goto free_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (crypto_info->version == TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) nonce_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) prot->aad_size = TLS_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) prot->tail_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) prot->aad_size = TLS_AAD_SPACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) prot->tail_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) prot->version = crypto_info->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) prot->cipher_type = crypto_info->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) prot->tag_size = tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) prot->overhead_size = prot->prepend_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) prot->tag_size + prot->tail_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) prot->iv_size = iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) prot->salt_size = salt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (!cctx->iv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) goto free_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) /* Note: 128 & 256 bit salt are the same size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) prot->rec_seq_size = rec_seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) memcpy(cctx->iv, salt, salt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) memcpy(cctx->iv + salt_size, iv, iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (!cctx->rec_seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) goto free_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!*aead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) *aead = crypto_alloc_aead(cipher_name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (IS_ERR(*aead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) rc = PTR_ERR(*aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) *aead = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) goto free_rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) ctx->push_pending_record = tls_sw_push_pending_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) rc = crypto_aead_setkey(*aead, key, keysize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) goto free_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) rc = crypto_aead_setauthsize(*aead, prot->tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) goto free_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (sw_ctx_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (crypto_info->version == TLS_1_3_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) sw_ctx_rx->async_capable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) sw_ctx_rx->async_capable =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) !!(tfm->__crt_alg->cra_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) CRYPTO_ALG_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) /* Set up strparser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) memset(&cb, 0, sizeof(cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) cb.rcv_msg = tls_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) cb.parse_msg = tls_read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) strp_init(&sw_ctx_rx->strp, sk, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) free_aead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) crypto_free_aead(*aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) *aead = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) free_rec_seq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) kfree(cctx->rec_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) cctx->rec_seq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) free_iv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) kfree(cctx->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) cctx->iv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) free_priv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) kfree(ctx->priv_ctx_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) ctx->priv_ctx_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) kfree(ctx->priv_ctx_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) ctx->priv_ctx_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }