^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <net/inet_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <net/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* device_offload_lock is used to synchronize tls_dev_add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * against NETDEV_DOWN notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static DECLARE_RWSEM(device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void tls_device_gc_task(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static LIST_HEAD(tls_device_gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static LIST_HEAD(tls_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static LIST_HEAD(tls_device_down_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static DEFINE_SPINLOCK(tls_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void tls_device_free_ctx(struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (ctx->tx_conf == TLS_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) kfree(tls_offload_ctx_tx(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) kfree(ctx->tx.rec_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) kfree(ctx->tx.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (ctx->rx_conf == TLS_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) kfree(tls_offload_ctx_rx(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) tls_ctx_free(NULL, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void tls_device_gc_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tls_context *ctx, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) LIST_HEAD(gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock_irqsave(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) list_splice_init(&tls_device_gc_list, &gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_unlock_irqrestore(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct net_device *netdev = ctx->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (netdev && ctx->tx_conf == TLS_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) TLS_OFFLOAD_CTX_DIR_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ctx->netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) list_del(&ctx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) tls_device_free_ctx(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) spin_lock_irqsave(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_move_tail(&ctx->list, &tls_device_gc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* schedule_work inside the spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * to make sure tls_device_down waits for that work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) schedule_work(&tls_device_gc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) spin_unlock_irqrestore(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* We assume that the socket is already connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static struct net_device *get_netdev_for_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct dst_entry *dst = sk_dst_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct net_device *netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (likely(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) netdev = dst->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dev_hold(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void destroy_record(struct tls_record_info *record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) for (i = 0; i < record->num_frags; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) __skb_frag_unref(&record->frags[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) kfree(record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct tls_record_info *info, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) list_del(&info->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) destroy_record(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) offload_ctx->retransmit_hint = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct tls_record_info *info, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct tls_offload_context_tx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u64 deleted_records = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!tls_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ctx = tls_offload_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spin_lock_irqsave(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) info = ctx->retransmit_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (info && !before(acked_seq, info->end_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ctx->retransmit_hint = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (before(acked_seq, info->end_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) list_del(&info->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) destroy_record(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) deleted_records++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ctx->unacked_record_sn += deleted_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* At this point, there should be no references on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * socket and no in-flight SKBs associated with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * socket, so it is safe to free all the resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void tls_device_sk_destruct(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) tls_ctx->sk_destruct(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (tls_ctx->tx_conf == TLS_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (ctx->open_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) destroy_record(ctx->open_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) delete_all_records(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) crypto_free_aead(ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) clean_acked_data_disable(inet_csk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (refcount_dec_and_test(&tls_ctx->refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) tls_device_queue_ctx_destruction(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void tls_device_free_resources_tx(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) tls_free_partial_record(sk, tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u8 *rcd_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) skb = tcp_write_queue_tail(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) TCP_SKB_CB(skb)->eor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rcd_sn = tls_ctx->tx.rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) down_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) netdev = tls_ctx->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rcd_sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) TLS_OFFLOAD_CTX_DIR_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void tls_append_frag(struct tls_record_info *record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct page_frag *pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) frag = &record->frags[record->num_frags - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (skb_frag_page(frag) == pfrag->page &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) skb_frag_size_add(frag, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ++frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) __skb_frag_set_page(frag, pfrag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) skb_frag_off_set(frag, pfrag->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) skb_frag_size_set(frag, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ++record->num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) get_page(pfrag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pfrag->offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) record->len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int tls_push_record(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct tls_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct tls_offload_context_tx *offload_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct tls_record_info *record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct tls_prot_info *prot = &ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) record->end_seq = tp->write_seq + record->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) list_add_tail_rcu(&record->list, &offload_ctx->records_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) offload_ctx->open_record = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) tls_device_resync_tx(sk, ctx, tp->write_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) tls_advance_record_sn(sk, prot, &ctx->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) for (i = 0; i < record->num_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) frag = &record->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sg_unmark_end(&offload_ctx->sg_tx_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) skb_frag_size(frag), skb_frag_off(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) sk_mem_charge(sk, skb_frag_size(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) get_page(skb_frag_page(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* all ready, send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int tls_device_record_close(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct tls_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct tls_record_info *record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct page_frag *pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned char record_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct tls_prot_info *prot = &ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* append tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * device will fill in the tag, we just need to append a placeholder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * use socket memory to improve coalescing (re-using a single buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * increases frag count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * if we can't allocate memory now, steal some back from data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sk->sk_allocation))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tls_append_frag(record, pfrag, prot->tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ret = prot->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (record->len <= prot->overhead_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* fill prepend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) record->len - prot->overhead_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) record_type, prot->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct page_frag *pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) size_t prepend_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct tls_record_info *record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) record = kmalloc(sizeof(*record), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) frag = &record->frags[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) __skb_frag_set_page(frag, pfrag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) skb_frag_off_set(frag, pfrag->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) skb_frag_size_set(frag, prepend_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) get_page(pfrag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pfrag->offset += prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) record->num_frags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) record->len = prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) offload_ctx->open_record = record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int tls_do_allocation(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct tls_offload_context_tx *offload_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct page_frag *pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) size_t prepend_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!offload_ctx->open_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sk->sk_allocation))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sk_stream_moderate_sndbuf(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (pfrag->size > pfrag->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!sk_page_frag_refill(sk, pfrag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) size_t pre_copy, nocache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (pre_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) pre_copy = min(pre_copy, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (copy_from_iter(addr, pre_copy, i) != pre_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bytes -= pre_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) addr += pre_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) nocache = round_down(bytes, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (copy_from_iter_nocache(addr, nocache, i) != nocache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bytes -= nocache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) addr += nocache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (bytes && copy_from_iter(addr, bytes, i) != bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static int tls_push_data(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct iov_iter *msg_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) size_t size, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned char record_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct tls_record_info *record = ctx->open_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) int tls_push_record_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct page_frag *pfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) size_t orig_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u32 max_open_record_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) bool more = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int copy, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (unlikely(sk->sk_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) flags |= MSG_SENDPAGE_DECRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (tls_is_partially_sent_record(tls_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) rc = tls_push_partial_record(sk, tls_ctx, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pfrag = sk_page_frag(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * we need to leave room for an authentication tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) prot->prepend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rc = sk_stream_wait_memory(sk, &timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) record = ctx->open_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) handle_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (record_type != TLS_RECORD_TYPE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* avoid sending partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * record with type !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * application_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) size = orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) destroy_record(record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ctx->open_record = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) } else if (record->len > prot->prepend_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto last_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) record = ctx->open_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) copy = min_t(size_t, copy, (max_open_record_len - record->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) rc = tls_device_copy_data(page_address(pfrag->page) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) pfrag->offset, copy, msg_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) goto handle_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) tls_append_frag(record, pfrag, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) size -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) last_record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) tls_push_record_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (done || record->len >= max_open_record_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) (record->num_frags >= MAX_SKB_FRAGS - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rc = tls_device_record_close(sk, tls_ctx, record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pfrag, record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (rc > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) size += rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) size = orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) destroy_record(record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ctx->open_record = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) rc = tls_push_record(sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) tls_push_record_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } while (!done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) tls_ctx->pending_open_record_frags = more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (orig_size - size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rc = orig_size - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned char record_type = TLS_RECORD_TYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mutex_lock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (unlikely(msg->msg_controllen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rc = tls_proccess_cmsg(sk, msg, &record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rc = tls_push_data(sk, &msg->msg_iter, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) msg->msg_flags, record_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mutex_unlock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int tls_device_sendpage(struct sock *sk, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int offset, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct iov_iter msg_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct kvec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (flags & MSG_SENDPAGE_NOTLAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mutex_lock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (flags & MSG_OOB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) iov.iov_base = kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) iov.iov_len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rc = tls_push_data(sk, &msg_iter, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) flags, TLS_RECORD_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) mutex_unlock(&tls_ctx->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u32 seq, u64 *p_record_sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u64 record_sn = context->hint_record_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct tls_record_info *info, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) info = context->retransmit_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!info ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) before(seq, info->end_seq - info->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* if retransmit_hint is irrelevant start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * from the beggining of the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) info = list_first_entry_or_null(&context->records_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct tls_record_info, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* send the start_marker record if seq number is before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * tls offload start marker sequence number. This record is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * required to handle TCP packets which are before TLS offload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * And if it's not start marker, look if this seq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * belongs to the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (likely(!tls_record_is_start_marker(info))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* we have the first record, get the last record to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * if this seq number belongs to the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) last = list_last_entry(&context->records_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct tls_record_info, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!between(seq, tls_record_start_seq(info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) last->end_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) record_sn = context->unacked_record_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* We just need the _rcu for the READ_ONCE() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) list_for_each_entry_from_rcu(info, &context->records_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (before(seq, info->end_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!context->retransmit_hint ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) after(info->end_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) context->retransmit_hint->end_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) context->hint_record_sn = record_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) context->retransmit_hint = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *p_record_sn = record_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto exit_rcu_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) record_sn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) exit_rcu_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) EXPORT_SYMBOL(tls_get_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int tls_device_push_pending_record(struct sock *sk, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct iov_iter msg_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (tls_is_partially_sent_record(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) gfp_t sk_allocation = sk->sk_allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) WARN_ON_ONCE(sk->sk_write_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sk->sk_allocation = GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) tls_push_partial_record(sk, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) MSG_DONTWAIT | MSG_NOSIGNAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) MSG_SENDPAGE_DECRYPTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) sk->sk_allocation = sk_allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static void tls_device_resync_rx(struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct sock *sk, u32 seq, u8 *rcd_sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) netdev = READ_ONCE(tls_ctx->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) TLS_OFFLOAD_CTX_DIR_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) s64 resync_req, u32 *seq, u16 *rcd_delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) u32 is_async = resync_req & RESYNC_REQ_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) u32 req_seq = resync_req >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) *rcd_delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (is_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* shouldn't get to wraparound:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * too long in async stage, something bad happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* asynchronous stage: log all headers seq such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * req_seq <= seq <= end_seq, and wait for real resync request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (before(*seq, req_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!after(*seq, req_end) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) resync_async->log[resync_async->loglen++] = *seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) resync_async->rcd_delta++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* synchronous stage: check against the logged entries and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * proceed to check the next entries if no match was found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) for (i = 0; i < resync_async->loglen; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (req_seq == resync_async->log[i] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) *rcd_delta = resync_async->rcd_delta - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *seq = req_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) resync_async->loglen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) resync_async->rcd_delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) resync_async->loglen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) resync_async->rcd_delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (req_seq == *seq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) atomic64_try_cmpxchg(&resync_async->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) &resync_req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct tls_offload_context_rx *rx_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u32 sock_data, is_req_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct tls_prot_info *prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) s64 resync_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) u16 rcd_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) u32 req_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (tls_ctx->rx_conf != TLS_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) rx_ctx = tls_offload_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) switch (rx_ctx->resync_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) resync_req = atomic64_read(&rx_ctx->resync_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) req_seq = resync_req >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) seq += TLS_HEADER_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) is_req_pending = resync_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (likely(!is_req_pending) || req_seq != seq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (likely(!rx_ctx->resync_nh_do_now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* head of next rec is already in, note that the sock_inq will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * include the currently parsed message when called from parser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) sock_data = tcp_inq(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (sock_data > rcd_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) trace_tls_device_rx_resync_nh_delay(sk, sock_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rcd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rx_ctx->resync_nh_do_now = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) seq += rcd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) tls_bigint_increment(rcd_sn, prot->rec_seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) resync_req = atomic64_read(&rx_ctx->resync_async->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) is_req_pending = resync_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (likely(!is_req_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!tls_device_rx_resync_async(rx_ctx->resync_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) resync_req, &seq, &rcd_delta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) tls_bigint_subtract(rcd_sn, rcd_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct tls_offload_context_rx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct strp_msg *rxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* device will request resyncs by itself based on stream scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* already scheduled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (ctx->resync_nh_do_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* seen decrypted fragments since last fully-failed record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (ctx->resync_nh_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ctx->resync_nh_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ctx->resync_nh.decrypted_failed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* doing resync, bump the next target in case it fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ctx->resync_nh.decrypted_tgt *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* head of next rec is already in, parser will sync for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (tcp_inq(sk) > rxm->full_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) trace_tls_device_rx_resync_nh_schedule(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ctx->resync_nh_do_now = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tls_bigint_increment(rcd_sn, prot->rec_seq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rcd_sn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct strp_msg *rxm = strp_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct sk_buff *skb_iter, *unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) char *orig_buf, *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!orig_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) buf = orig_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) nsg = skb_cow_data(skb, 0, &unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (unlikely(nsg < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) err = nsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) sg_init_table(sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) sg_set_buf(&sg[0], buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) rxm->full_len + TLS_HEADER_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) TLS_CIPHER_AES_GCM_128_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) err = skb_copy_bits(skb, offset, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* We are interested only in the decrypted data not the auth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) err = decrypt_skb(sk, skb, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (err != -EBADMSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (skb_pagelen(skb) > offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) copy = min_t(int, skb_pagelen(skb) - offset, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (skb->decrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) err = skb_store_bits(skb, offset, buf, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) buf += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pos = skb_pagelen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) skb_walk_frags(skb, skb_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int frag_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Practically all frags must belong to msg if reencrypt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * is needed with current strparser and coalescing logic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * but strparser may "get optimized", so let's be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (pos + skb_iter->len <= offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) goto done_with_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (pos >= data_len + rxm->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) frag_pos = offset - pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) copy = min_t(int, skb_iter->len - frag_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) data_len + rxm->offset - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (skb_iter->decrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) err = skb_store_bits(skb_iter, frag_pos, buf, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) buf += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) done_with_frag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) pos += skb_iter->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) kfree(orig_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct sk_buff *skb, struct strp_msg *rxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int is_decrypted = skb->decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int is_encrypted = !is_decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct sk_buff *skb_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Check if all the data is decrypted already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) skb_walk_frags(skb, skb_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) is_decrypted &= skb_iter->decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) is_encrypted &= !skb_iter->decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) tls_ctx->rx.rec_seq, rxm->full_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) is_encrypted, is_decrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ctx->sw.decrypted |= is_decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (likely(is_encrypted || is_decrypted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* After tls_device_down disables the offload, the next SKB will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * likely have initial fragments decrypted, and final ones not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * decrypted. We need to reencrypt that single SKB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return tls_device_reencrypt(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* Return immediately if the record is either entirely plaintext or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * entirely ciphertext. Otherwise handle reencrypt partially decrypted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (is_decrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ctx->resync_nh_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (is_encrypted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ctx->resync_nh_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return tls_device_reencrypt(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (sk->sk_destruct != tls_device_sk_destruct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) refcount_set(&ctx->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) dev_hold(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ctx->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) spin_lock_irq(&tls_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) list_add_tail(&ctx->list, &tls_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) spin_unlock_irq(&tls_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ctx->sk_destruct = sk->sk_destruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) u16 nonce_size, tag_size, iv_size, rec_seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct tls_prot_info *prot = &tls_ctx->prot_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct tls_record_info *start_marker_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct tls_offload_context_tx *offload_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct tls_crypto_info *crypto_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) char *iv, *rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) __be64 rcd_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (ctx->priv_ctx_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!start_marker_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!offload_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) goto free_marker_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) crypto_info = &ctx->crypto_send.info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (crypto_info->version != TLS_1_2_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) goto free_offload_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) switch (crypto_info->cipher_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) case TLS_CIPHER_AES_GCM_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rec_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto free_offload_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Sanity-check the rec_seq_size for stack allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto free_offload_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) prot->version = crypto_info->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) prot->cipher_type = crypto_info->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) prot->tag_size = tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) prot->overhead_size = prot->prepend_size + prot->tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) prot->iv_size = iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (!ctx->tx.iv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) goto free_offload_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) prot->rec_seq_size = rec_seq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!ctx->tx.rec_seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) goto free_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto free_rec_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* start at rec_seq - 1 to account for the start marker record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) start_marker_record->end_seq = tcp_sk(sk)->write_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) start_marker_record->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) start_marker_record->num_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) INIT_LIST_HEAD(&offload_ctx->records_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_lock_init(&offload_ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) sg_init_table(offload_ctx->sg_tx_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ARRAY_SIZE(offload_ctx->sg_tx_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ctx->push_pending_record = tls_device_push_pending_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* TLS offload is greatly simplified if we don't send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * SKBs where only part of the payload needs to be encrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * So mark the last skb in the write queue as end of record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) skb = tcp_write_queue_tail(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) TCP_SKB_CB(skb)->eor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) netdev = get_netdev_for_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) pr_err_ratelimited("%s: netdev not found\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) goto disable_cad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) goto release_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Avoid offloading if the device is down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * We don't want to offload new flows after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * the NETDEV_DOWN event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * device_offload_lock is taken in tls_devices's NETDEV_DOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * handler thus protecting from the device going down before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * ctx was added to tls_device_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) down_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!(netdev->flags & IFF_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) goto release_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ctx->priv_ctx_tx = offload_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) &ctx->crypto_send.info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) tcp_sk(sk)->write_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) tcp_sk(sk)->write_seq, rec_seq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) goto release_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) tls_device_attach(ctx, sk, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* following this assignment tls_is_sk_tx_device_offloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * will return true and the context might be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * by the netdev's xmit function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) release_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) release_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) disable_cad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) clean_acked_data_disable(inet_csk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) crypto_free_aead(offload_ctx->aead_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) free_rec_seq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) kfree(ctx->tx.rec_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) free_iv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) kfree(ctx->tx.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) free_offload_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) kfree(offload_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ctx->priv_ctx_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) free_marker_record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) kfree(start_marker_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct tls12_crypto_info_aes_gcm_128 *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct tls_offload_context_rx *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) netdev = get_netdev_for_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) pr_err_ratelimited("%s: netdev not found\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) goto release_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* Avoid offloading if the device is down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * We don't want to offload new flows after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * the NETDEV_DOWN event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * device_offload_lock is taken in tls_devices's NETDEV_DOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * handler thus protecting from the device going down before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * ctx was added to tls_device_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) down_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (!(netdev->flags & IFF_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto release_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) goto release_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) context->resync_nh_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ctx->priv_ctx_rx = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) rc = tls_set_sw_offload(sk, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto release_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) &ctx->crypto_recv.info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) tcp_sk(sk)->copied_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) info = (void *)&ctx->crypto_recv.info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) tcp_sk(sk)->copied_seq, info->rec_seq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto free_sw_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) tls_device_attach(ctx, sk, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) free_sw_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) tls_sw_free_resources_rx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) down_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) release_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ctx->priv_ctx_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) release_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) release_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) void tls_device_offload_cleanup_rx(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct tls_context *tls_ctx = tls_get_ctx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) down_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) netdev = tls_ctx->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) TLS_OFFLOAD_CTX_DIR_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (tls_ctx->tx_conf != TLS_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) tls_ctx->netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) up_read(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) tls_sw_release_resources_rx(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static int tls_device_down(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct tls_context *ctx, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Request a write lock to block new offload attempts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) down_write(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) spin_lock_irqsave(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (ctx->netdev != netdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) !refcount_inc_not_zero(&ctx->refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) list_move(&ctx->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) spin_unlock_irqrestore(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) list_for_each_entry_safe(ctx, tmp, &list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Stop offloaded TX and switch to the fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * tls_is_sk_tx_device_offloaded will return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* Stop the RX and TX resync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * tls_dev_resync must not be called after tls_dev_del.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) WRITE_ONCE(ctx->netdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* Start skipping the RX resync logic completely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* Sync with inflight packets. After this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * TX: no non-encrypted packets will be passed to the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * RX: resync requests from the driver will be ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* Release the offload context on the driver side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (ctx->tx_conf == TLS_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) TLS_OFFLOAD_CTX_DIR_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (ctx->rx_conf == TLS_HW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) TLS_OFFLOAD_CTX_DIR_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Move the context to a separate list for two reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * 1. When the context is deallocated, list_del is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * 2. It's no longer an offloaded context, so we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * run offload-specific code on this context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) spin_lock_irqsave(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) list_move_tail(&ctx->list, &tls_device_down_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) spin_unlock_irqrestore(&tls_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* Device contexts for RX and TX will be freed in on sk_destruct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) up_write(&device_offload_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) flush_work(&tls_device_gc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static int tls_dev_event(struct notifier_block *this, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (!dev->tlsdev_ops &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case NETDEV_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) case NETDEV_FEAT_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if ((dev->features & NETIF_F_HW_TLS_RX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) !dev->tlsdev_ops->tls_dev_resync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (dev->tlsdev_ops &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) dev->tlsdev_ops->tls_dev_add &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) dev->tlsdev_ops->tls_dev_del)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return tls_device_down(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static struct notifier_block tls_dev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) .notifier_call = tls_dev_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) void __init tls_device_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) register_netdevice_notifier(&tls_dev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) void __exit tls_device_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) unregister_netdevice_notifier(&tls_dev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) flush_work(&tls_device_gc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) clean_acked_data_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }