^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifndef _TLS_OFFLOAD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define _TLS_OFFLOAD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/skmsg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/android_kabi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <net/strparser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <uapi/linux/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Maximum data size carried in a TLS record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define TLS_HEADER_SIZE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define TLS_RECORD_TYPE_DATA 0x17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define TLS_AAD_SPACE_SIZE 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define MAX_IV_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define TLS_MAX_REC_SEQ_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Hence b0 contains (3 - 1) = 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define TLS_AES_CCM_IV_B0_BYTE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define __TLS_INC_STATS(net, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __SNMP_INC_STATS((net)->mib.tls_statistics, field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define TLS_INC_STATS(net, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) SNMP_INC_STATS((net)->mib.tls_statistics, field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define __TLS_DEC_STATS(net, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define TLS_DEC_STATS(net, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) SNMP_DEC_STATS((net)->mib.tls_statistics, field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) TLS_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) TLS_SW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) TLS_HW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) TLS_HW_RECORD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) TLS_NUM_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * allocated or mapped for each TLS record. After encryption, the records are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * stores in a linked list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct tls_rec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int tx_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int tx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct sk_msg msg_plaintext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct sk_msg msg_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* AAD | msg_plaintext.sg.data | sg_tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct scatterlist sg_aead_in[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct scatterlist sg_aead_out[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) char content_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct scatterlist sg_content_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) char aad_space[TLS_AAD_SPACE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 iv_data[MAX_IV_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct aead_request aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u8 aead_req_ctx[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct tls_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct strp_msg rxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u8 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct tx_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct delayed_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct tls_sw_context_tx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct crypto_aead *aead_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct crypto_wait async_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct tx_work tx_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct tls_rec *open_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) atomic_t encrypt_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* protect crypto_wait with encrypt_pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spinlock_t encrypt_compl_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int async_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u8 async_capable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define BIT_TX_SCHEDULED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define BIT_TX_CLOSING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long tx_bitmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct tls_sw_context_rx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct crypto_aead *aead_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct crypto_wait async_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct strparser strp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct sk_buff_head rx_list; /* list of decrypted 'data' records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void (*saved_data_ready)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct sk_buff *recv_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u8 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u8 async_capable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u8 decrypted:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) atomic_t decrypt_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* protect crypto_wait with decrypt_pending*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) spinlock_t decrypt_compl_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bool async_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct tls_record_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 end_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) skb_frag_t frags[MAX_SKB_FRAGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct tls_offload_context_tx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct crypto_aead *aead_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) spinlock_t lock; /* protects records list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct list_head records_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct tls_record_info *open_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct tls_record_info *retransmit_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u64 hint_record_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u64 unacked_record_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void (*sk_destruct)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u8 driver_state[] __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* The TLS layer reserves room for driver specific state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Currently the belief is that there is not enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * driver specific state to justify another layer of indirection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define TLS_DRIVER_STATE_SIZE_TX 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) enum tls_context_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* tls_device_down was called after the netdev went down, device state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * was released, and kTLS works in software, even though rx_conf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * still TLS_HW (needed for transition).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) TLS_RX_DEV_DEGRADED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Unlike RX where resync is driven entirely by the core in TX only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * the driver knows when things went out of sync, so we need the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * to be atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) TLS_TX_SYNC_SCHED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* tls_dev_del was called for the RX side, device state was released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * but tls_ctx->netdev might still be kept, because TX-side driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * resources might not be released yet. Used to prevent the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * tls_dev_del call in tls_device_down if it happens simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) TLS_RX_DEV_CLOSED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct cipher_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) char *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) char *rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) union tls_crypto_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct tls_crypto_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct tls_prot_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u16 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u16 cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u16 prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u16 tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u16 overhead_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u16 iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u16 salt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u16 rec_seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u16 aad_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u16 tail_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct tls_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* read-only cache line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct tls_prot_info prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u8 tx_conf:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u8 rx_conf:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int (*push_pending_record)(struct sock *sk, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void (*sk_write_space)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) void *priv_ctx_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void *priv_ctx_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* rw cache line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct cipher_context tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct cipher_context rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct scatterlist *partially_sent_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u16 partially_sent_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bool in_tcp_sendpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bool pending_open_record_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct mutex tx_lock; /* protects partially_sent_* fields and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * per-type TX fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* cache cold stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct proto *sk_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void (*sk_destruct)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) union tls_crypto_context crypto_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) union tls_crypto_context crypto_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) refcount_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) enum tls_offload_ctx_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) TLS_OFFLOAD_CTX_DIR_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) TLS_OFFLOAD_CTX_DIR_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct tlsdev_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) enum tls_offload_ctx_dir direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct tls_crypto_info *crypto_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u32 start_offload_tcp_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) void (*tls_dev_del)(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct tls_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) enum tls_offload_ctx_dir direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int (*tls_dev_resync)(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct sock *sk, u32 seq, u8 *rcd_sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) enum tls_offload_ctx_dir direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ANDROID_KABI_RESERVE(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ANDROID_KABI_RESERVE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ANDROID_KABI_RESERVE(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) enum tls_offload_sync_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct tls_offload_resync_async {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) atomic64_t req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u16 loglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u16 rcd_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct tls_offload_context_rx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* sw must be the first member of tls_offload_context_rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct tls_sw_context_rx sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) enum tls_offload_sync_type resync_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* this member is set regardless of resync_type, to avoid branches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u8 resync_nh_reset:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* CORE_NEXT_HINT-only member, but use the hole here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u8 resync_nh_do_now:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) atomic64_t resync_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u32 decrypted_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u32 decrypted_tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) } resync_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct tls_offload_resync_async *resync_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u8 driver_state[] __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* The TLS layer reserves room for driver specific state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Currently the belief is that there is not enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * driver specific state to justify another layer of indirection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define TLS_DRIVER_STATE_SIZE_RX 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct tls_context *tls_ctx_create(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void update_sk_prot(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int wait_on_pending_writer(struct sock *sk, long *timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int tls_sk_query(struct sock *sk, int optname, char __user *optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int __user *optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unsigned int optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void tls_err_abort(struct sock *sk, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) void tls_sw_strparser_done(struct tls_context *tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int offset, size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int tls_sw_sendpage(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int offset, size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void tls_sw_release_resources_tx(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) void tls_sw_free_resources_rx(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) void tls_sw_release_resources_rx(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int nonblock, int flags, int *addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bool tls_sw_stream_read(const struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) size_t len, unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int tls_device_sendpage(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int offset, size_t size, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int tls_tx_records(struct sock *sk, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 seq, u64 *p_record_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return rec->len == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static inline u32 tls_record_start_seq(struct tls_record_info *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return rec->end_seq - rec->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int tls_push_sg(struct sock *sk, struct tls_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct scatterlist *sg, u16 first_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static inline struct tls_msg *tls_msg(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return (struct tls_msg *)strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return !!ctx->partially_sent_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return tls_ctx->pending_open_record_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct tls_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return READ_ONCE(rec->tx_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) switch (config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) case TLS_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return TLS_CONF_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) case TLS_SW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return TLS_CONF_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case TLS_HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return TLS_CONF_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) case TLS_HW_RECORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return TLS_CONF_HW_RECORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) #ifdef CONFIG_SOCK_VALIDATE_XMIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return sk_fullsock(sk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) &tls_validate_xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static inline bool tls_bigint_increment(unsigned char *seq, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for (i = len - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ++seq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (seq[i] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return (i == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static inline void tls_bigint_subtract(unsigned char *seq, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) u64 rcd_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) __be64 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) p = (__be64 *)seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rcd_sn = be64_to_cpu(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *p = cpu_to_be64(rcd_sn - n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static inline struct tls_context *tls_get_ctx(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Use RCU on icsk_ulp_data only for sock diag code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * TLS data path doesn't need rcu_dereference().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return (__force void *)icsk->icsk_ulp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static inline void tls_advance_record_sn(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct tls_prot_info *prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct cipher_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) tls_err_abort(sk, -EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (prot->version != TLS_1_3_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) prot->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static inline void tls_fill_prepend(struct tls_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) size_t plaintext_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned char record_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct tls_prot_info *prot = &ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) size_t pkt_len, iv_size = prot->iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pkt_len = plaintext_len + prot->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (version != TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) pkt_len += iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) memcpy(buf + TLS_NONCE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* we cover nonce explicit here as well, so buf should be of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) buf[0] = version == TLS_1_3_VERSION ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) TLS_RECORD_TYPE_DATA : record_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) buf[1] = TLS_1_2_VERSION_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) buf[2] = TLS_1_2_VERSION_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* we can use IV for nonce explicit according to spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) buf[3] = pkt_len >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) buf[4] = pkt_len & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static inline void tls_make_aad(char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) char *record_sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int record_sequence_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned char record_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (version != TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) memcpy(buf, record_sequence, record_sequence_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) buf += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) buf[0] = version == TLS_1_3_VERSION ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) TLS_RECORD_TYPE_DATA : record_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) buf[1] = TLS_1_2_VERSION_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) buf[2] = TLS_1_2_VERSION_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) buf[3] = size >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) buf[4] = size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static inline void xor_iv_with_seq(int version, char *iv, char *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (version == TLS_1_3_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) iv[i + 4] ^= seq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) const struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) const struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static inline struct tls_offload_context_tx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) tls_offload_ctx_tx(const struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct tls_context *ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return !!tls_sw_ctx_tx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct tls_context *ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return !!tls_sw_ctx_rx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static inline struct tls_offload_context_rx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) tls_offload_ctx_rx(const struct tls_context *tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) #if IS_ENABLED(CONFIG_TLS_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) enum tls_offload_ctx_dir direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (direction == TLS_OFFLOAD_CTX_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return tls_offload_ctx_tx(tls_ctx)->driver_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return tls_offload_ctx_rx(tls_ctx)->driver_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static inline void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return __tls_driver_ctx(tls_get_ctx(sk), direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #define RESYNC_REQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #define RESYNC_REQ_ASYNC BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* The TLS context is valid until sk_destruct is called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Log all TLS record header TCP sequences in [seq, seq+len] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) rx_ctx->resync_async->loglen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rx_ctx->resync_async->rcd_delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) atomic64_set(&rx_ctx->resync_async->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ((u64)ntohl(seq) << 32) | RESYNC_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) tls_offload_ctx_rx(tls_ctx)->resync_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* Driver's seq tracking has to be disabled until resync succeeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static inline bool tls_offload_tx_resync_pending(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int __net_init tls_proc_init(struct net *net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) void __net_exit tls_proc_fini(struct net *net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned char *record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int decrypt_skb(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct scatterlist *sgout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int tls_sw_fallback_init(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct tls_offload_context_tx *offload_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct tls_crypto_info *crypto_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #ifdef CONFIG_TLS_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) void tls_device_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) void tls_device_cleanup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) void tls_device_sk_destruct(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) void tls_device_free_resources_tx(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) void tls_device_offload_cleanup_rx(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct sk_buff *skb, struct strp_msg *rxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!sk_fullsock(sk) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return tls_get_ctx(sk)->rx_conf == TLS_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static inline void tls_device_init(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static inline void tls_device_cleanup(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static inline void tls_device_free_resources_tx(struct sock *sk) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct sk_buff *skb, struct strp_msg *rxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #endif /* _TLS_OFFLOAD_H */