^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2019 Google LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) "blk-crypto: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/keyslot-manager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "blk-crypto-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) const struct blk_crypto_mode blk_crypto_modes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .cipher_str = "xts(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) .keysize = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) .ivsize = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) .cipher_str = "essiv(cbc(aes),sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) .keysize = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) .ivsize = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) [BLK_ENCRYPTION_MODE_ADIANTUM] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) .cipher_str = "adiantum(xchacha12,aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .keysize = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .ivsize = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * This number needs to be at least (the number of threads doing IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * concurrently) * (maximum recursive depth of a bio), so that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * deadlock on crypt_ctx allocations. The default is chosen to be the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * as the default number of post read contexts in both EXT4 and F2FS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int num_prealloc_crypt_ctxs = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) module_param(num_prealloc_crypt_ctxs, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) "Number of bio crypto contexts to preallocate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct kmem_cache *bio_crypt_ctx_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static mempool_t *bio_crypt_ctx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int __init bio_crypt_ctx_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!bio_crypt_ctx_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) goto out_no_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) bio_crypt_ctx_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (!bio_crypt_ctx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) goto out_no_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* This is assumed in various places. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Sanity check that no algorithm exceeds the defined limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) out_no_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) panic("Failed to allocate mem for bio crypt ctxs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) subsys_initcall(bio_crypt_ctx_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct bio_crypt_ctx *bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * that the mempool_alloc() can't fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bc->bc_key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) bio->bi_crypt_context = bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void __bio_crypt_free_ctx(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bio->bi_crypt_context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!dst->bi_crypt_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *dst->bi_crypt_context = *src->bi_crypt_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) EXPORT_SYMBOL_GPL(__bio_crypt_clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dun[i] += inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * If the addition in this limb overflowed, then we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * carry 1 into the next limb. Else the carry is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (dun[i] < inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) inc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) inc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct bio_crypt_ctx *bc = bio->bi_crypt_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bio_crypt_dun_increment(bc->bc_dun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bytes >> bc->bc_key->data_unit_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @next_dun, treating the DUNs as multi-limb integers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (bc->bc_dun[i] + carry != next_dun[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * If the addition in this limb overflowed, then we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * carry 1 into the next limb. Else the carry is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if ((bc->bc_dun[i] + carry) < carry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) carry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) carry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* If the DUN wrapped through 0, don't treat it as contiguous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return carry == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Checks that two bio crypt contexts are compatible - i.e. that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * they are mergeable except for data_unit_num continuity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct bio_crypt_ctx *bc2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!bc1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return !bc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return bc2 && bc1->bc_key == bc2->bc_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Checks that two bio crypt contexts are compatible, and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * that their data_unit_nums are continuous (and can hence be merged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * in the order @bc1 followed by @bc2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct bio_crypt_ctx *bc2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!bio_crypt_ctx_compatible(bc1, bc2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Check that all I/O segments are data unit aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static bool bio_crypt_check_alignment(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) const unsigned int data_unit_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bio_for_each_segment(bv, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) blk_status_t __blk_crypto_init_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) &rq->crypt_keyslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @rq: The request whose crypto fields to uninitialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Completely uninitializes the crypto fields of a request. If a keyslot has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * been programmed into some inline encryption hardware, that keyslot is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * released. The rq->crypt_ctx is also freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void __blk_crypto_free_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) blk_ksm_put_slot(rq->crypt_keyslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) blk_crypto_rq_set_defaults(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * __blk_crypto_bio_prep - Prepare bio for inline encryption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @bio_ptr: pointer to original bio pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * If the bio crypt context provided for the bio is supported by the underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * device's inline encryption hardware, do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Otherwise, try to perform en/decryption for this bio by falling back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * kernel crypto API. When the crypto API fallback is used for encryption,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * blk-crypto may choose to split the bio into 2 - the first one that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * continue to be processed and the second one that will be resubmitted via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * of the aforementioned "first one", and *bio_ptr will be updated to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * bounce bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Caller must ensure bio has bio_crypt_ctx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Return: true on success; false on error (and bio->bi_status will be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * appropriately, and bio_endio() will have been called so bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * submission should abort).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bool __blk_crypto_bio_prep(struct bio **bio_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct bio *bio = *bio_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Error if bio has no data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (WARN_ON_ONCE(!bio_has_data(bio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!bio_crypt_check_alignment(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Success if device supports the encryption context, or if we succeeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * in falling back to the crypto API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) &bc_key->crypto_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (blk_crypto_fallback_bio_prep(bio_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bio_endio(*bio_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!rq->crypt_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!rq->crypt_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *rq->crypt_ctx = *bio->bi_crypt_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * blk_crypto_init_key() - Prepare a key for use with blk-crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @blk_key: Pointer to the blk_crypto_key to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @raw_key: Pointer to the raw key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @raw_key_size: Size of raw key. Must be at least the required size for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * to be longer than the mode's actual key size, in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * support inline encryption hardware that accepts wrapped keys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @is_hw_wrapped has to be set for such keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * @is_hw_wrapped: Denotes @raw_key is wrapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * @crypto_mode: identifier for the encryption algorithm to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * @dun_bytes: number of bytes that will be used to specify the DUN when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * key is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @data_unit_size: the data unit size to use for en/decryption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * Return: 0 on success, -errno on failure. The caller is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * zeroizing both blk_key and raw_key when done with them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int blk_crypto_init_key(struct blk_crypto_key *blk_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) const u8 *raw_key, unsigned int raw_key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bool is_hw_wrapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) enum blk_crypto_mode_num crypto_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned int dun_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned int data_unit_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) const struct blk_crypto_mode *mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) memset(blk_key, 0, sizeof(*blk_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mode = &blk_crypto_modes[crypto_mode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (is_hw_wrapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (raw_key_size < mode->keysize ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (raw_key_size != mode->keysize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (dun_bytes == 0 || dun_bytes > mode->ivsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!is_power_of_2(data_unit_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) blk_key->crypto_cfg.crypto_mode = crypto_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) blk_key->crypto_cfg.dun_bytes = dun_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) blk_key->crypto_cfg.data_unit_size = data_unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) blk_key->data_unit_size_bits = ilog2(data_unit_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) blk_key->size = raw_key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) memcpy(blk_key->raw, raw_key, raw_key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) EXPORT_SYMBOL_GPL(blk_crypto_init_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * request queue it's submitted to supports inline crypto, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * blk-crypto-fallback is enabled and supports the cfg).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) bool blk_crypto_config_supported(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) const struct blk_crypto_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) !cfg->is_hw_wrapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @key: A key to use on the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Upper layers must call this function to ensure that either the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * supports the key's crypto settings, or the crypto API fallback has transforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * for the needed mode allocated and ready to go. This function may allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * an skcipher, and *should not* be called from the data path, since that might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * cause a deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * blk-crypto-fallback is either disabled or the needed algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * is disabled in the crypto API; or another -errno code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int blk_crypto_start_using_key(const struct blk_crypto_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (key->crypto_cfg.is_hw_wrapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pr_warn_once("hardware doesn't support wrapped keys\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * it may have been programmed into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @q: The request queue who's associated inline encryption hardware this key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * might have been programmed into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @key: The key to evict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Upper layers (filesystems) must call this function to ensure that a key is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * evicted from any hardware that it might have been programmed into. The key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * must not be in use by any in-flight IO when this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Return: 0 on success or if key is not present in the q's ksm, -err on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int blk_crypto_evict_key(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) const struct blk_crypto_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return blk_ksm_evict_key(q->ksm, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * If the request queue's associated inline encryption hardware didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * have support for the key, then the key might have been programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * into the fallback keyslot manager, so try to evict from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return blk_crypto_fallback_evict_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) EXPORT_SYMBOL_GPL(blk_crypto_evict_key);