^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2003 Jana Saout <jana@saout.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <crypto/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/key-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <keys/user-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <keys/encrypted-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define DM_MSG_PREFIX "crypt"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * context holding the current state of a multi-part conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct convert_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct completion restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct bio *bio_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct bio *bio_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct bvec_iter iter_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct bvec_iter iter_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u64 cc_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) atomic_t cc_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct skcipher_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct aead_request *req_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) } r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * per bio private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct dm_crypt_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct crypt_config *cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct bio *base_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u8 *integrity_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bool integrity_metadata_from_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct convert_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) atomic_t io_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) blk_status_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) } CRYPTO_MINALIGN_ATTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct dm_crypt_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct convert_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct scatterlist sg_in[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct scatterlist sg_out[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 iv_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct crypt_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct crypt_iv_operations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) const char *opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void (*dtr)(struct crypt_config *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int (*init)(struct crypt_config *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int (*wipe)(struct crypt_config *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int (*generator)(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct dm_crypt_request *dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int (*post)(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dm_crypt_request *dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct iv_benbi_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define LMK_SEED_SIZE 64 /* hash + 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct iv_lmk_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct crypto_shash *hash_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u8 *seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define TCW_WHITENING_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct iv_tcw_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct crypto_shash *crc32_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 *iv_seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u8 *whitening;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define ELEPHANT_MAX_KEY_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct iv_elephant_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct crypto_skcipher *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Crypt: maps a linear range of a block device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * and encrypts / decrypts at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) DM_CRYPT_WRITE_INLINE };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum cipher_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * The fields in here must be read only after initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct crypt_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct dm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sector_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct percpu_counter n_allocated_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct workqueue_struct *io_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct workqueue_struct *crypt_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spinlock_t write_thread_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct task_struct *write_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct rb_root write_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) char *cipher_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) char *cipher_auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) char *key_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const struct crypt_iv_operations *iv_gen_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct iv_benbi_private benbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct iv_lmk_private lmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct iv_tcw_private tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct iv_elephant_private elephant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } iv_gen_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 iv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned short int sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned char sector_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct crypto_skcipher **tfms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct crypto_aead **tfms_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } cipher_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned tfms_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long cipher_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Layout of each crypto request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * struct skcipher_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * struct dm_crypt_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * IV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * The padding is added so that dm_crypt_request and the IV are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * correctly aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int dmreq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int per_bio_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int key_parts; /* independent parts in key buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int key_extra_size; /* additional keys length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int key_mac_size; /* MAC key size for authenc(...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int integrity_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int integrity_iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int on_disk_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * pool for per bio private data, crypto requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * encryption requeusts/buffer pages and integrity tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned tag_pool_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mempool_t tag_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mempool_t req_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mempool_t page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct bio_set bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct mutex bio_alloc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u8 *authenc_key; /* space for keys in authenc() format (if used) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u8 key[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define MIN_IOS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define MAX_TAG_SIZE 480
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define POOL_ENTRY_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static DEFINE_SPINLOCK(dm_crypt_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static unsigned dm_crypt_clients_n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static volatile unsigned long dm_crypt_pages_per_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define DM_CRYPT_MEMORY_PERCENT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void clone_init(struct dm_crypt_io *, struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void kcryptd_queue_crypt(struct dm_crypt_io *io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct scatterlist *sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static bool crypt_integrity_aead(struct crypt_config *cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Use this to access cipher attributes that are independent of the key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return cc->cipher_tfm.tfms[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return cc->cipher_tfm.tfms_aead[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Different IV generation algorithms:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * plain: the initial vector is the 32-bit little-endian version of the sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * number, padded with zeros if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * plain64: the initial vector is the 64-bit little-endian version of the sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * number, padded with zeros if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * plain64be: the initial vector is the 64-bit big-endian version of the sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * number, padded with zeros if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * essiv: "encrypted sector|salt initial vector", the sector number is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * encrypted with the bulk cipher using a salt as key. The salt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * should be derived from the bulk cipher's key via hashing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * (needed for LRW-32-AES and possible other narrow block modes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * null: the initial vector is always zero. Provides compatibility with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * obsolete loop_fish2 devices. Do not use for new devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * lmk: Compatible implementation of the block chaining mode used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * by the Loop-AES block device encryption system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * It operates on full 512 byte sectors and uses CBC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * with an IV derived from the sector number, the data and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * optionally extra IV seed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * This means that after decryption the first block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * of sector must be tweaked according to decrypted data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * Loop-AES can use three encryption schemes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * version 1: is plain aes-cbc mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * version 2: uses 64 multikey scheme with lmk IV generator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * version 3: the same as version 2 with additional IV seed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * (it uses 65 keys, last key is used as IV seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * tcw: Compatible implementation of the block chaining mode used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * by the TrueCrypt device encryption system (prior to version 4.1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * It operates on full 512 byte sectors and uses CBC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * with an IV derived from initial key and the sector number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * In addition, whitening value is applied on every sector, whitening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * is calculated from initial key, sector number and mixed using CRC32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Note that this encryption scheme is vulnerable to watermarking attacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * and should be used for old compatible containers access only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * The IV is encrypted little-endian byte-offset (with the same key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * and cipher as the volume).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * elephant: The extended version of eboiv with additional Elephant diffuser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * used with Bitlocker CBC mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * This mode was used in older Windows systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) memset(iv, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) memset(iv, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) memset(iv, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* iv_size is at least of size u64; usually it is 16 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * ESSIV encryption of the IV is now handled by the crypto API,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * so just pass the plain sector number here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) memset(iv, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) const char *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) bs = crypto_aead_blocksize(any_tfm_aead(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) bs = crypto_skcipher_blocksize(any_tfm(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) log = ilog2(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* we need to calculate how far we must shift the sector count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * to get the cipher block count, we use this shift in _gen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (1 << log != bs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ti->error = "cypher blocksize is not a power of 2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (log > 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ti->error = "cypher blocksize is > 512";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) cc->iv_gen_private.benbi.shift = 9 - log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void crypt_iv_benbi_dtr(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) __be64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) memset(iv, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void crypt_iv_lmk_dtr(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) crypto_free_shash(lmk->hash_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) lmk->hash_tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) kfree_sensitive(lmk->seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) lmk->seed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) const char *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (cc->sector_size != (1 << SECTOR_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ti->error = "Unsupported sector size for LMK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) lmk->hash_tfm = crypto_alloc_shash("md5", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (IS_ERR(lmk->hash_tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ti->error = "Error initializing LMK hash";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return PTR_ERR(lmk->hash_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* No seed in LMK version 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (cc->key_parts == cc->tfms_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) lmk->seed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!lmk->seed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) crypt_iv_lmk_dtr(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ti->error = "Error kmallocing seed storage in LMK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int crypt_iv_lmk_init(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int subkey_size = cc->key_size / cc->key_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* LMK seed is on the position of LMK_KEYS + 1 key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (lmk->seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) crypto_shash_digestsize(lmk->hash_tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int crypt_iv_lmk_wipe(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (lmk->seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) memset(lmk->seed, 0, LMK_SEED_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct dm_crypt_request *dmreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct md5_state md5state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __le32 buf[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) desc->tfm = lmk->hash_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) r = crypto_shash_init(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (lmk->seed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Sector is always 512B, block size 16, add data of blocks 1-31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) r = crypto_shash_update(desc, data + 16, 16 * 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* Sector is cropped to 56 bits here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) buf[2] = cpu_to_le32(4024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) buf[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* No MD5 padding here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) r = crypto_shash_export(desc, &md5state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) for (i = 0; i < MD5_HASH_WORDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __cpu_to_le32s(&md5state.hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) memcpy(iv, &md5state.hash, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u8 *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) sg = crypt_get_sg_data(cc, dmreq->sg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) src = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) memset(iv, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) u8 *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) sg = crypt_get_sg_data(cc, dmreq->sg_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dst = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Tweak the first block of plaintext sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) crypto_xor(dst + sg->offset, iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void crypt_iv_tcw_dtr(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) kfree_sensitive(tcw->iv_seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tcw->iv_seed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kfree_sensitive(tcw->whitening);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) tcw->whitening = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) crypto_free_shash(tcw->crc32_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) tcw->crc32_tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) const char *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (cc->sector_size != (1 << SECTOR_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ti->error = "Unsupported sector size for TCW";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ti->error = "Wrong key size for TCW";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (IS_ERR(tcw->crc32_tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ti->error = "Error initializing CRC32 in TCW";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return PTR_ERR(tcw->crc32_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!tcw->iv_seed || !tcw->whitening) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) crypt_iv_tcw_dtr(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ti->error = "Error allocating seed storage in TCW";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int crypt_iv_tcw_init(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) TCW_WHITENING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int crypt_iv_tcw_wipe(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) memset(tcw->iv_seed, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int crypt_iv_tcw_whitening(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct dm_crypt_request *dmreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) __le64 sector = cpu_to_le64(dmreq->iv_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) u8 buf[TCW_WHITENING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* xor whitening with sector number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* calculate crc32 for every 32bit part and xor it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) desc->tfm = tcw->crc32_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) r = crypto_shash_init(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) r = crypto_shash_update(desc, &buf[i * 4], 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) r = crypto_shash_final(desc, &buf[i * 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) crypto_xor(&buf[0], &buf[12], 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) crypto_xor(&buf[4], &buf[8], 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* apply whitening (8 bytes) to whole sector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) crypto_xor(data + i * 8, buf, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) memzero_explicit(buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) __le64 sector = cpu_to_le64(dmreq->iv_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) u8 *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Remove whitening from ciphertext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) sg = crypt_get_sg_data(cc, dmreq->sg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) src = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Calculate IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (cc->iv_size > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cc->iv_size - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) u8 *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Apply whitening on ciphertext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) sg = crypt_get_sg_data(cc, dmreq->sg_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dst = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Used only for writes, there must be an additional space to store IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) get_random_bytes(iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) const char *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (crypt_integrity_aead(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ti->error = "AEAD transforms not supported for EBOIV";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ti->error = "Block size of EBOIV cipher does "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) "not match IV size of block cipher";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct skcipher_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct scatterlist src, dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) DECLARE_CRYPTO_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) memset(buf, 0, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) sg_init_one(&dst, iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) skcipher_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static void crypt_iv_elephant_dtr(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) crypto_free_skcipher(elephant->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) elephant->tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) const char *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (IS_ERR(elephant->tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) r = PTR_ERR(elephant->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) elephant->tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) r = crypt_iv_eboiv_ctr(cc, ti, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) crypt_iv_elephant_dtr(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static void diffuser_disk_to_cpu(u32 *d, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) #ifndef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) d[i] = le32_to_cpu((__le32)d[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static void diffuser_cpu_to_disk(__le32 *d, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) #ifndef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) d[i] = cpu_to_le32((u32)d[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static void diffuser_a_decrypt(u32 *d, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int i, i1, i2, i3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) i1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) i2 = n - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) i3 = n - 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) while (i1 < (n - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (i3 >= n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) i3 -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) d[i1] += d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (i2 >= n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) i2 -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) d[i1] += d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static void diffuser_a_encrypt(u32 *d, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int i, i1, i2, i3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) i1 = n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) i2 = n - 2 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) i3 = n - 5 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) while (i1 > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) d[i1] -= d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (i2 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) i2 += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) d[i1] -= d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (i3 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) i3 += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static void diffuser_b_decrypt(u32 *d, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int i, i1, i2, i3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) i1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) i2 = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) i3 = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) while (i1 < (n - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) d[i1] += d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (i2 >= n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) i2 -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) d[i1] += d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (i3 >= n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) i3 -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) i1++; i2++; i3++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static void diffuser_b_encrypt(u32 *d, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int i, i1, i2, i3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) i1 = n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) i2 = 2 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) i3 = 5 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) while (i1 > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (i3 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) i3 += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) d[i1] -= d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (i2 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) i2 += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) d[i1] -= d[i2] ^ d[i3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) i1--; i2--; i3--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u8 *es, *ks, *data, *data2, *data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct skcipher_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct scatterlist *sg, *sg2, src, dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) DECLARE_CRYPTO_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) es = kzalloc(16, GFP_NOIO); /* Key for AES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!req || !es || !ks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* E(Ks, e(s)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) sg_init_one(&src, es, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) sg_init_one(&dst, ks, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* E(Ks, e'(s)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) es[15] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) sg_init_one(&dst, &ks[16], 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) sg = crypt_get_sg_data(cc, dmreq->sg_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) data = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) data_offset = data + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) data2 = kmap_atomic(sg_page(sg2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) kunmap_atomic(data2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) for (i = 0; i < (cc->sector_size / 32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) crypto_xor(data_offset + i * 32, ks, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) kunmap_atomic(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) kfree_sensitive(ks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) kfree_sensitive(es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) skcipher_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) r = crypt_iv_elephant(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return crypt_iv_eboiv_gen(cc, iv, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return crypt_iv_elephant(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static int crypt_iv_elephant_init(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) int key_offset = cc->key_size - cc->key_extra_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static int crypt_iv_elephant_wipe(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) u8 key[ELEPHANT_MAX_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) memset(key, 0, cc->key_extra_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static const struct crypt_iv_operations crypt_iv_plain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) .generator = crypt_iv_plain_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static const struct crypt_iv_operations crypt_iv_plain64_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) .generator = crypt_iv_plain64_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .generator = crypt_iv_plain64be_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static const struct crypt_iv_operations crypt_iv_essiv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) .generator = crypt_iv_essiv_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static const struct crypt_iv_operations crypt_iv_benbi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) .ctr = crypt_iv_benbi_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) .dtr = crypt_iv_benbi_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) .generator = crypt_iv_benbi_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static const struct crypt_iv_operations crypt_iv_null_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .generator = crypt_iv_null_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static const struct crypt_iv_operations crypt_iv_lmk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .ctr = crypt_iv_lmk_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .dtr = crypt_iv_lmk_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .init = crypt_iv_lmk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .wipe = crypt_iv_lmk_wipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) .generator = crypt_iv_lmk_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) .post = crypt_iv_lmk_post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static const struct crypt_iv_operations crypt_iv_tcw_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) .ctr = crypt_iv_tcw_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) .dtr = crypt_iv_tcw_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) .init = crypt_iv_tcw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) .wipe = crypt_iv_tcw_wipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) .generator = crypt_iv_tcw_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) .post = crypt_iv_tcw_post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static struct crypt_iv_operations crypt_iv_random_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .generator = crypt_iv_random_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static struct crypt_iv_operations crypt_iv_eboiv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .ctr = crypt_iv_eboiv_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .generator = crypt_iv_eboiv_gen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static struct crypt_iv_operations crypt_iv_elephant_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .ctr = crypt_iv_elephant_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .dtr = crypt_iv_elephant_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .init = crypt_iv_elephant_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) .wipe = crypt_iv_elephant_wipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .generator = crypt_iv_elephant_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .post = crypt_iv_elephant_post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Integrity extensions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static bool crypt_integrity_aead(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static bool crypt_integrity_hmac(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return crypt_integrity_aead(cc) && cc->key_mac_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Get sg containing data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (unlikely(crypt_integrity_aead(cc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return &sg[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct bio_integrity_payload *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) unsigned int tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (IS_ERR(bip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return PTR_ERR(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) bip->bip_iter.bi_size = tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) bip->bip_iter.bi_sector = io->cc->start + io->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) tag_len, offset_in_page(io->integrity_metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (unlikely(ret != tag_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct mapped_device *md = dm_table_get_md(ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* From now we require underlying device with our integrity profile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ti->error = "Integrity profile not supported.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (bi->tag_size != cc->on_disk_tag_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) bi->tuple_size != cc->on_disk_tag_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ti->error = "Integrity profile tag size mismatch.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (1 << bi->interval_exp != cc->sector_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ti->error = "Integrity profile sector size mismatch.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (crypt_integrity_aead(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) cc->integrity_tag_size, cc->integrity_iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ti->error = "Integrity AEAD auth tag size is not supported.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) } else if (cc->integrity_iv_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) cc->integrity_iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ti->error = "Not enough space for integrity tag in the profile.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ti->error = "Integrity profile not supported.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static void crypt_convert_init(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct convert_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct bio *bio_out, struct bio *bio_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ctx->bio_in = bio_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ctx->bio_out = bio_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (bio_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ctx->iter_in = bio_in->bi_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (bio_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ctx->iter_out = bio_out->bi_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ctx->cc_sector = sector + cc->iv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) init_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) void *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return (void *)((char *)dmreq - cc->dmreq_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static u8 *iv_of_dmreq(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return (u8 *)ALIGN((unsigned long)(dmreq + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return (u8 *)ALIGN((unsigned long)(dmreq + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) crypto_skcipher_alignmask(any_tfm(cc)) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static u8 *org_iv_of_dmreq(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return iv_of_dmreq(cc, dmreq) + cc->iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return (__le64 *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) cc->iv_size + sizeof(uint64_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return (unsigned int*)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static void *tag_from_dmreq(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct convert_context *ctx = dmreq->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) cc->on_disk_tag_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static void *iv_tag_from_dmreq(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct dm_crypt_request *dmreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static int crypt_convert_block_aead(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct convert_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) unsigned int tag_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct dm_crypt_request *dmreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) u8 *iv, *org_iv, *tag_iv, *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) __le64 *sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /* Reject unexpected unaligned bio. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) dmreq = dmreq_of_req(cc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) dmreq->iv_sector = ctx->cc_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dmreq->iv_sector >>= cc->sector_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) dmreq->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) *org_tag_of_dmreq(cc, dmreq) = tag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) sector = org_sector_of_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) iv = iv_of_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) org_iv = org_iv_of_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) tag = tag_from_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) tag_iv = iv_tag_from_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* AEAD request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * | (authenticated) | (auth+encryption) | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * | sector_LE | IV | sector in/out | tag in/out |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) sg_init_table(dmreq->sg_in, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sg_init_table(dmreq->sg_out, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (cc->iv_gen_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* For READs use IV stored in integrity metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) memcpy(org_iv, tag_iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* Store generated IV in integrity metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (cc->integrity_iv_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) memcpy(tag_iv, org_iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* Working copy of IV, to be modified in crypto API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) memcpy(iv, org_iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (bio_data_dir(ctx->bio_in) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) cc->sector_size, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) r = crypto_aead_encrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) cc->sector_size + cc->integrity_tag_size, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) r = crypto_aead_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (r == -EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) (unsigned long long)le64_to_cpu(*sector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int crypt_convert_block_skcipher(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct convert_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) unsigned int tag_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct scatterlist *sg_in, *sg_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct dm_crypt_request *dmreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) u8 *iv, *org_iv, *tag_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) __le64 *sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Reject unexpected unaligned bio. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dmreq = dmreq_of_req(cc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) dmreq->iv_sector = ctx->cc_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) dmreq->iv_sector >>= cc->sector_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) dmreq->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) *org_tag_of_dmreq(cc, dmreq) = tag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) iv = iv_of_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) org_iv = org_iv_of_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) tag_iv = iv_tag_from_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) sector = org_sector_of_dmreq(cc, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* For skcipher we use only the first sg item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) sg_in = &dmreq->sg_in[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) sg_out = &dmreq->sg_out[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) sg_init_table(sg_in, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) sg_init_table(sg_out, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (cc->iv_gen_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* For READs use IV stored in integrity metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) memcpy(org_iv, tag_iv, cc->integrity_iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* Data can be already preprocessed in generator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) sg_in = sg_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /* Store generated IV in integrity metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (cc->integrity_iv_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) memcpy(tag_iv, org_iv, cc->integrity_iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* Working copy of IV, to be modified in crypto API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) memcpy(iv, org_iv, cc->iv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (bio_data_dir(ctx->bio_in) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) r = crypto_skcipher_encrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) r = crypto_skcipher_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void kcryptd_async_done(struct crypto_async_request *async_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) int error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static int crypt_alloc_req_skcipher(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct convert_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (!ctx->r.req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (!ctx->r.req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * requests if driver request queue is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) skcipher_request_set_callback(ctx->r.req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int crypt_alloc_req_aead(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct convert_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (!ctx->r.req_aead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (!ctx->r.req_aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * requests if driver request queue is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) aead_request_set_callback(ctx->r.req_aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static int crypt_alloc_req(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct convert_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return crypt_alloc_req_aead(cc, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return crypt_alloc_req_skcipher(cc, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static void crypt_free_req_skcipher(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct skcipher_request *req, struct bio *base_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if ((struct skcipher_request *)(io + 1) != req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) mempool_free(req, &cc->req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static void crypt_free_req_aead(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct aead_request *req, struct bio *base_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if ((struct aead_request *)(io + 1) != req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) mempool_free(req, &cc->req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) crypt_free_req_aead(cc, req, base_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) crypt_free_req_skcipher(cc, req, base_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * Encrypt / decrypt data from one bio to another one (can be the same one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static blk_status_t crypt_convert(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct convert_context *ctx, bool atomic, bool reset_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned int tag_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * if reset_pending is set we are dealing with the bio for the first time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * else we're continuing to work on the previous bio, so don't mess with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * the cc_pending counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (reset_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) atomic_set(&ctx->cc_pending, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) r = crypt_alloc_req(cc, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) complete(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) atomic_inc(&ctx->cc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * The request was queued by a crypto driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * but the driver request queue is full, let's wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) case -EBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (try_wait_for_completion(&ctx->restart)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * we don't have to block to wait for completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * so proceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * we can't wait for completion without blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * exit and continue processing in a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) ctx->r.req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ctx->cc_sector += sector_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) tag_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) wait_for_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) reinit_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * The request is queued and processed asynchronously,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * completion function kcryptd_async_done() will be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ctx->r.req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) ctx->cc_sector += sector_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) tag_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * The request was already processed (synchronously).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) atomic_dec(&ctx->cc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ctx->cc_sector += sector_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) tag_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * There was a data integrity error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) case -EBADMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) atomic_dec(&ctx->cc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return BLK_STS_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * There was an error while processing the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) atomic_dec(&ctx->cc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * Generate a new unfragmented bio with the given size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * This should never violate the device limitations (but only because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * max_segment_size is being constrained to PAGE_SIZE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * This function may be called concurrently. If we allocate from the mempool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * concurrently, there is a possibility of deadlock. For example, if we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * mempool of 256 pages, two processes, each wanting 256, pages allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * the mempool concurrently, it may deadlock in a situation where both processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * have allocated 128 pages and the mempool is exhausted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * In order to avoid this scenario we allocate the pages under a mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * In order to not degrade performance with excessive locking, we try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * non-blocking allocations without a mutex first but on failure we fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * to blocking allocations with a mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) struct bio *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) unsigned i, len, remaining_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) mutex_lock(&cc->bio_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) clone_init(io, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) remaining_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) for (i = 0; i < nr_iovecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) page = mempool_alloc(&cc->page_pool, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) crypt_free_buffer_pages(cc, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) gfp_mask |= __GFP_DIRECT_RECLAIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) bio_add_page(clone, page, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) remaining_size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Allocate space for integrity tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (dm_crypt_integrity_io_alloc(io, clone)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) crypt_free_buffer_pages(cc, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) clone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) mutex_unlock(&cc->bio_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct bio_vec *bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) bio_for_each_segment_all(bv, clone, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) BUG_ON(!bv->bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) mempool_free(bv->bv_page, &cc->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct bio *bio, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) io->cc = cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) io->base_bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) io->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) io->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) io->ctx.r.req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) io->integrity_metadata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) io->integrity_metadata_from_pool = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) atomic_set(&io->io_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static void crypt_inc_pending(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) atomic_inc(&io->io_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void kcryptd_io_bio_endio(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) bio_endio(io->base_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * One of the bios was finished. Check for completion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * the whole request and correctly clean up the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static void crypt_dec_pending(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct bio *base_bio = io->base_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) blk_status_t error = io->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (!atomic_dec_and_test(&io->io_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (io->ctx.r.req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) crypt_free_req(cc, io->ctx.r.req, base_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (unlikely(io->integrity_metadata_from_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) mempool_free(io->integrity_metadata, &io->cc->tag_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) kfree(io->integrity_metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) base_bio->bi_status = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * If we are running this function from our tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * we can't call bio_endio() here, because it will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * clone_endio() from dm.c, which in turn will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * free the current struct dm_crypt_io structure with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * our tasklet. In this case we need to delay bio_endio()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * execution to after the tasklet is done and dequeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (tasklet_trylock(&io->tasklet)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) tasklet_unlock(&io->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) bio_endio(base_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) INIT_WORK(&io->work, kcryptd_io_bio_endio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) queue_work(cc->io_queue, &io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * kcryptd/kcryptd_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * Needed because it would be very unwise to do decryption in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * kcryptd performs the actual encryption or decryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * kcryptd_io performs the IO submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * They must be separated as otherwise the final stages could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * starved by new requests which can block in the first stages due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * to memory allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * The work is done per CPU global for all dm-crypt instances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * They should not depend on each other and do not block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) static void crypt_endio(struct bio *clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct dm_crypt_io *io = clone->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) unsigned rw = bio_data_dir(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) blk_status_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * free the processed pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (rw == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) crypt_free_buffer_pages(cc, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) error = clone->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (rw == READ && !error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) kcryptd_queue_crypt(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) io->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static void clone_init(struct dm_crypt_io *io, struct bio *clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) clone->bi_private = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) clone->bi_end_io = crypt_endio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) bio_set_dev(clone, cc->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) clone->bi_opf = io->base_bio->bi_opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct bio *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * We need the original biovec array in order to decrypt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * the whole bio data *afterwards* -- thanks to immutable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * biovecs we don't need to worry about the block layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * modifying the biovec array; so leverage bio_clone_fast().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) crypt_inc_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) clone_init(io, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) clone->bi_iter.bi_sector = cc->start + io->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (dm_crypt_integrity_io_alloc(io, clone)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) submit_bio_noacct(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static void kcryptd_io_read_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) crypt_inc_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (kcryptd_io_read(io, GFP_NOIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) io->error = BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static void kcryptd_queue_read(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) INIT_WORK(&io->work, kcryptd_io_read_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) queue_work(cc->io_queue, &io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) static void kcryptd_io_write(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct bio *clone = io->ctx.bio_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) submit_bio_noacct(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static int dmcrypt_write(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct crypt_config *cc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct dm_crypt_io *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct rb_root write_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) spin_lock_irq(&cc->write_thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) continue_locked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (!RB_EMPTY_ROOT(&cc->write_tree))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) goto pop_from_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) spin_unlock_irq(&cc->write_thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (unlikely(kthread_should_stop())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) spin_lock_irq(&cc->write_thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) goto continue_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) pop_from_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) write_tree = cc->write_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) cc->write_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) spin_unlock_irq(&cc->write_thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) BUG_ON(rb_parent(write_tree.rb_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * Note: we cannot walk the tree here with rb_next because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * the structures may be freed when kcryptd_io_write is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) io = crypt_io_from_node(rb_first(&write_tree));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) rb_erase(&io->rb_node, &write_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) kcryptd_io_write(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) } while (!RB_EMPTY_ROOT(&write_tree));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) struct bio *clone = io->ctx.bio_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct rb_node **rbp, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (unlikely(io->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) crypt_free_buffer_pages(cc, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) bio_put(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) /* crypt_convert should have filled the clone bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) BUG_ON(io->ctx.iter_out.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) clone->bi_iter.bi_sector = cc->start + io->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) submit_bio_noacct(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) spin_lock_irqsave(&cc->write_thread_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (RB_EMPTY_ROOT(&cc->write_tree))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) wake_up_process(cc->write_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) rbp = &cc->write_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) sector = io->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) while (*rbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) parent = *rbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (sector < crypt_io_from_node(parent)->sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) rbp = &(*rbp)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) rbp = &(*rbp)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) rb_link_node(&io->rb_node, parent, rbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) rb_insert_color(&io->rb_node, &cc->write_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) spin_unlock_irqrestore(&cc->write_thread_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct convert_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * constraints so they do not need to be issued inline by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * kcryptd_crypt_write_convert().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) switch (bio_op(ctx->bio_in)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) static void kcryptd_crypt_write_continue(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct convert_context *ctx = &io->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) int crypt_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) sector_t sector = io->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) blk_status_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) wait_for_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) reinit_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) r = crypt_convert(cc, &io->ctx, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) io->error = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /* Wait for completion signaled by kcryptd_async_done() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) wait_for_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) crypt_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* Encryption was already finished, submit io now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (crypt_finished) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) kcryptd_crypt_write_io_submit(io, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) io->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct convert_context *ctx = &io->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) struct bio *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) int crypt_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) sector_t sector = io->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) blk_status_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * Prevent io from disappearing until this function completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) crypt_inc_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (unlikely(!clone)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) io->error = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) io->ctx.bio_out = clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) io->ctx.iter_out = clone->bi_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) sector += bio_sectors(clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) crypt_inc_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) r = crypt_convert(cc, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * Crypto API backlogged the request, because its queue was full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * and we're in softirq context, so continue from a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * (TODO: is it actually possible to be in softirq in the write path?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (r == BLK_STS_DEV_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) INIT_WORK(&io->work, kcryptd_crypt_write_continue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) queue_work(cc->crypt_queue, &io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) io->error = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /* Wait for completion signaled by kcryptd_async_done() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) wait_for_completion(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) crypt_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /* Encryption was already finished, submit io now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (crypt_finished) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) kcryptd_crypt_write_io_submit(io, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) io->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) static void kcryptd_crypt_read_continue(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) blk_status_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) wait_for_completion(&io->ctx.restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) reinit_completion(&io->ctx.restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) r = crypt_convert(cc, &io->ctx, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) io->error = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (atomic_dec_and_test(&io->ctx.cc_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) kcryptd_crypt_read_done(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) blk_status_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) crypt_inc_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) io->sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) r = crypt_convert(cc, &io->ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * Crypto API backlogged the request, because its queue was full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * and we're in softirq context, so continue from a workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (r == BLK_STS_DEV_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) INIT_WORK(&io->work, kcryptd_crypt_read_continue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) queue_work(cc->crypt_queue, &io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) io->error = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (atomic_dec_and_test(&io->ctx.cc_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) kcryptd_crypt_read_done(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) crypt_dec_pending(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static void kcryptd_async_done(struct crypto_async_request *async_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct dm_crypt_request *dmreq = async_req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct convert_context *ctx = dmreq->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * A request from crypto driver backlog is going to be processed now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * finish the completion and continue in crypt_convert().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * (Callback will be called for the second time for this request.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (error == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) complete(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (error == -EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) io->error = BLK_STS_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) } else if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) io->error = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (!atomic_dec_and_test(&ctx->cc_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * The request is fully completed: for inline writes, let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * kcryptd_crypt_write_convert() do the IO submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (bio_data_dir(io->base_bio) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) kcryptd_crypt_read_done(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (kcryptd_crypt_write_inline(cc, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) complete(&ctx->restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) kcryptd_crypt_write_io_submit(io, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) static void kcryptd_crypt(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (bio_data_dir(io->base_bio) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) kcryptd_crypt_read_convert(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) kcryptd_crypt_write_convert(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static void kcryptd_crypt_tasklet(unsigned long work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) kcryptd_crypt((struct work_struct *)work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) static void kcryptd_queue_crypt(struct dm_crypt_io *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) struct crypt_config *cc = io->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * it is being executed with irqs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (in_irq() || irqs_disabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) tasklet_schedule(&io->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) kcryptd_crypt(&io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) INIT_WORK(&io->work, kcryptd_crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) queue_work(cc->crypt_queue, &io->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) static void crypt_free_tfms_aead(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (!cc->cipher_tfm.tfms_aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) cc->cipher_tfm.tfms_aead[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) kfree(cc->cipher_tfm.tfms_aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) cc->cipher_tfm.tfms_aead = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) static void crypt_free_tfms_skcipher(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (!cc->cipher_tfm.tfms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) for (i = 0; i < cc->tfms_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) cc->cipher_tfm.tfms[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) kfree(cc->cipher_tfm.tfms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) cc->cipher_tfm.tfms = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) static void crypt_free_tfms(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) crypt_free_tfms_aead(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) crypt_free_tfms_skcipher(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) sizeof(struct crypto_skcipher *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (!cc->cipher_tfm.tfms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) for (i = 0; i < cc->tfms_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (IS_ERR(cc->cipher_tfm.tfms[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) err = PTR_ERR(cc->cipher_tfm.tfms[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) crypt_free_tfms(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * dm-crypt performance can vary greatly depending on which crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * algorithm implementation is used. Help people debug performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * problems by logging the ->cra_driver_name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (!cc->cipher_tfm.tfms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) crypt_free_tfms(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return crypt_alloc_tfms_aead(cc, ciphermode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return crypt_alloc_tfms_skcipher(cc, ciphermode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) static unsigned crypt_subkey_size(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) static unsigned crypt_authenckey_size(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * the key must be for some reason in special format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * This funcion converts cc->key to this special format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) static void crypt_copy_authenckey(char *p, const void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) unsigned enckeylen, unsigned authkeylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) struct crypto_authenc_key_param *param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) struct rtattr *rta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) rta = (struct rtattr *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) param = RTA_DATA(rta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) param->enckeylen = cpu_to_be32(enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) rta->rta_len = RTA_LENGTH(sizeof(*param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) p += RTA_SPACE(sizeof(*param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) memcpy(p, key + enckeylen, authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) p += authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) memcpy(p, key, enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) static int crypt_setkey(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) unsigned subkey_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) int err = 0, i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /* Ignore extra keys (which are used for IV etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) subkey_size = crypt_subkey_size(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (crypt_integrity_hmac(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (subkey_size < cc->key_mac_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) crypt_copy_authenckey(cc->authenc_key, cc->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) subkey_size - cc->key_mac_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) cc->key_mac_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) for (i = 0; i < cc->tfms_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (crypt_integrity_hmac(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) cc->authenc_key, crypt_authenckey_size(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) else if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) cc->key + (i * subkey_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) subkey_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) cc->key + (i * subkey_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) subkey_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) err = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (crypt_integrity_hmac(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) #ifdef CONFIG_KEYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) static bool contains_whitespace(const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) while (*str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (isspace(*str++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) static int set_key_user(struct crypt_config *cc, struct key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) const struct user_key_payload *ukp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) ukp = user_key_payload_locked(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if (!ukp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) return -EKEYREVOKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) if (cc->key_size != ukp->datalen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) memcpy(cc->key, ukp->data, cc->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) #if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static int set_key_encrypted(struct crypt_config *cc, struct key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) const struct encrypted_key_payload *ekp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) ekp = key->payload.data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (!ekp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) return -EKEYREVOKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (cc->key_size != ekp->decrypted_datalen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) memcpy(cc->key, ekp->decrypted_data, cc->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) #endif /* CONFIG_ENCRYPTED_KEYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) char *new_key_string, *key_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) struct key_type *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) struct key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) int (*set_key)(struct crypt_config *cc, struct key *key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) * Reject key_string with whitespace. dm core currently lacks code for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (contains_whitespace(key_string)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) DMERR("whitespace chars not allowed in key string");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /* look for next ':' separating key_type from key_description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) key_desc = strpbrk(key_string, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) type = &key_type_logon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) set_key = set_key_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) type = &key_type_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) set_key = set_key_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) #if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) type = &key_type_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) set_key = set_key_encrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) new_key_string = kstrdup(key_string, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (!new_key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) key = request_key(type, key_desc + 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (IS_ERR(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) kfree_sensitive(new_key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return PTR_ERR(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) down_read(&key->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) ret = set_key(cc, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) up_read(&key->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) key_put(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) kfree_sensitive(new_key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) up_read(&key->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) key_put(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /* clear the flag since following operations may invalidate previously valid key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) ret = crypt_setkey(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) kfree_sensitive(cc->key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) cc->key_string = new_key_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) kfree_sensitive(new_key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) static int get_key_size(char **key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) char *colon, dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (*key_string[0] != ':')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return strlen(*key_string) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* look for next ':' in key string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) colon = strpbrk(*key_string + 1, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (!colon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) *key_string = colon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) /* remaining key string should be :<logon|user>:<key_desc> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) static int get_key_size(char **key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) #endif /* CONFIG_KEYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) static int crypt_set_key(struct crypt_config *cc, char *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) int key_string_len = strlen(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) /* Hyphen (which gives a key_size of zero) means there is no key. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) if (!cc->key_size && strcmp(key, "-"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /* ':' means the key is in kernel keyring, short-circuit normal key processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (key[0] == ':') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) r = crypt_set_keyring_key(cc, key + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /* clear the flag since following operations may invalidate previously valid key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /* wipe references to any kernel keyring key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) kfree_sensitive(cc->key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) cc->key_string = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /* Decode key from its hex representation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) r = crypt_setkey(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) /* Hex key string not needed after here, so wipe it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) memset(key, '0', key_string_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) static int crypt_wipe_key(struct crypt_config *cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) get_random_bytes(&cc->key, cc->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) /* Wipe IV private keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) r = cc->iv_gen_ops->wipe(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) kfree_sensitive(cc->key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) cc->key_string = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) r = crypt_setkey(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) memset(&cc->key, 0, cc->key_size * sizeof(u8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) static void crypt_calculate_pages_per_client(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (!dm_crypt_clients_n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) pages /= dm_crypt_clients_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) dm_crypt_pages_per_client = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct crypt_config *cc = pool_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * Note, percpu_counter_read_positive() may over (and under) estimate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * the current usage by at most (batch - 1) * num_online_cpus() pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * but avoids potential spinlock contention of an exact result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) likely(gfp_mask & __GFP_NORETRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) page = alloc_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (likely(page != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) percpu_counter_add(&cc->n_allocated_pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) static void crypt_page_free(void *page, void *pool_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) struct crypt_config *cc = pool_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) percpu_counter_sub(&cc->n_allocated_pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static void crypt_dtr(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) ti->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) if (!cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (cc->write_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) kthread_stop(cc->write_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (cc->io_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) destroy_workqueue(cc->io_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (cc->crypt_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) destroy_workqueue(cc->crypt_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) crypt_free_tfms(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) bioset_exit(&cc->bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) mempool_exit(&cc->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) mempool_exit(&cc->req_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) mempool_exit(&cc->tag_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) percpu_counter_destroy(&cc->n_allocated_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) cc->iv_gen_ops->dtr(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (cc->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) dm_put_device(ti, cc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) kfree_sensitive(cc->cipher_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) kfree_sensitive(cc->key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) kfree_sensitive(cc->cipher_auth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) kfree_sensitive(cc->authenc_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) mutex_destroy(&cc->bio_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /* Must zero key material before freeing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) kfree_sensitive(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) spin_lock(&dm_crypt_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) WARN_ON(!dm_crypt_clients_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) dm_crypt_clients_n--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) crypt_calculate_pages_per_client();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) spin_unlock(&dm_crypt_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (cc->iv_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) /* at least a 64 bit sector number should fit in our buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) cc->iv_size = max(cc->iv_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) (unsigned int)(sizeof(u64) / sizeof(u8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) else if (ivmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) DMWARN("Selected cipher does not support IVs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) ivmode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /* Choose ivmode, see comments at iv code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (ivmode == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) cc->iv_gen_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) else if (strcmp(ivmode, "plain") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) cc->iv_gen_ops = &crypt_iv_plain_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) else if (strcmp(ivmode, "plain64") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) cc->iv_gen_ops = &crypt_iv_plain64_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) else if (strcmp(ivmode, "plain64be") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) cc->iv_gen_ops = &crypt_iv_plain64be_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) else if (strcmp(ivmode, "essiv") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) cc->iv_gen_ops = &crypt_iv_essiv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) else if (strcmp(ivmode, "benbi") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) cc->iv_gen_ops = &crypt_iv_benbi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) else if (strcmp(ivmode, "null") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) cc->iv_gen_ops = &crypt_iv_null_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) else if (strcmp(ivmode, "eboiv") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) cc->iv_gen_ops = &crypt_iv_eboiv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) else if (strcmp(ivmode, "elephant") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) cc->iv_gen_ops = &crypt_iv_elephant_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) cc->key_parts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) cc->key_extra_size = cc->key_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) } else if (strcmp(ivmode, "lmk") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) cc->iv_gen_ops = &crypt_iv_lmk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) * Version 2 and 3 is recognised according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * to length of provided multi-key string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * If present (version 3), last key is used as IV seed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * All keys (including IV seed) are always the same size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) if (cc->key_size % cc->key_parts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) cc->key_parts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) cc->key_extra_size = cc->key_size / cc->key_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) } else if (strcmp(ivmode, "tcw") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) cc->iv_gen_ops = &crypt_iv_tcw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) cc->key_parts += 2; /* IV + whitening */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) } else if (strcmp(ivmode, "random") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) cc->iv_gen_ops = &crypt_iv_random_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) /* Need storage space in integrity fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) cc->integrity_iv_size = cc->iv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) ti->error = "Invalid IV mode";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) * Workaround to parse HMAC algorithm from AEAD crypto API spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) * The HMAC is needed to calculate tag size (HMAC digest size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) * This should be probably done by crypto-api calls (once available...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) char *start, *end, *mac_alg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) struct crypto_ahash *mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (!strstarts(cipher_api, "authenc("))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) start = strchr(cipher_api, '(');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) end = strchr(cipher_api, ',');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (!start || !end || ++start > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) if (!mac_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) strncpy(mac_alg, start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) kfree(mac_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (IS_ERR(mac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return PTR_ERR(mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) cc->key_mac_size = crypto_ahash_digestsize(mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) crypto_free_ahash(mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (!cc->authenc_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) char **ivmode, char **ivopts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) cc->tfms_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * New format (capi: prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * capi:cipher_api_spec-iv:ivopts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) tmp = &cipher_in[strlen("capi:")];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) /* Separate IV options if present, it can contain another '-' in hash name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) *ivopts = strrchr(tmp, ':');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (*ivopts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) **ivopts = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) (*ivopts)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /* Parse IV mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) *ivmode = strrchr(tmp, '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (*ivmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) **ivmode = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) (*ivmode)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) /* The rest is crypto API spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) cipher_api = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) /* Alloc AEAD, can be used only in new format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (crypt_integrity_aead(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) ret = crypt_ctr_auth_cipher(cc, cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) ti->error = "Invalid AEAD cipher spec";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (*ivmode && !strcmp(*ivmode, "lmk"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) cc->tfms_count = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (*ivmode && !strcmp(*ivmode, "essiv")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) if (!*ivopts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) ti->error = "Digest algorithm missing for ESSIV mode";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) cipher_api, *ivopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ti->error = "Cannot allocate cipher string";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) cipher_api = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) cc->key_parts = cc->tfms_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) /* Allocate cipher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) ret = crypt_alloc_tfms(cc, cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) ti->error = "Error allocating crypto tfm";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) char **ivmode, char **ivopts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) char *tmp, *cipher, *chainmode, *keycount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) char *cipher_api = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) ti->error = "Bad cipher specification";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * Legacy dm-crypt cipher specification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * cipher[:keycount]-mode-iv:ivopts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) tmp = cipher_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) keycount = strsep(&tmp, "-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) cipher = strsep(&keycount, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) if (!keycount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) cc->tfms_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) !is_power_of_2(cc->tfms_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) ti->error = "Bad cipher key count specification";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) cc->key_parts = cc->tfms_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) chainmode = strsep(&tmp, "-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) *ivmode = strsep(&tmp, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) *ivopts = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * For compatibility with the original dm-crypt mapping format, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * only the cipher name is supplied, use cbc-plain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) chainmode = "cbc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) *ivmode = "plain";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (strcmp(chainmode, "ecb") && !*ivmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) ti->error = "IV mechanism required";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (!cipher_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) goto bad_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (*ivmode && !strcmp(*ivmode, "essiv")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) if (!*ivopts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) ti->error = "Digest algorithm missing for ESSIV mode";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) kfree(cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) "%s(%s)", chainmode, cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) kfree(cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) goto bad_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) /* Allocate cipher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) ret = crypt_alloc_tfms(cc, cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) ti->error = "Error allocating crypto tfm";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) kfree(cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) kfree(cipher_api);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) bad_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) ti->error = "Cannot allocate cipher strings";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) char *ivmode = NULL, *ivopts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if (!cc->cipher_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) ti->error = "Cannot allocate cipher strings";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (strstarts(cipher_in, "capi:"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) /* Initialize IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) ret = crypt_ctr_ivmode(ti, ivmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) /* Initialize and set key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) ret = crypt_set_key(cc, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) ti->error = "Error decoding and setting key";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) /* Allocate IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) ti->error = "Error creating IV";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) /* Initialize IV (set keys for ESSIV etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) ret = cc->iv_gen_ops->init(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) ti->error = "Error initialising IV";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) /* wipe the kernel key payload copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (cc->key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) memset(cc->key, 0, cc->key_size * sizeof(u8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) struct dm_arg_set as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) static const struct dm_arg _args[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) {0, 8, "Invalid number of feature args"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) unsigned int opt_params, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) const char *opt_string, *sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) /* Optional parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) as.argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) as.argv = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) while (opt_params--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) opt_string = dm_shift_arg(&as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) if (!opt_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) ti->error = "Not enough feature arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (!strcasecmp(opt_string, "allow_discards"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) ti->num_discard_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) else if (!strcasecmp(opt_string, "same_cpu_crypt"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) else if (!strcasecmp(opt_string, "no_read_workqueue"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) else if (!strcasecmp(opt_string, "no_write_workqueue"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (val == 0 || val > MAX_TAG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) ti->error = "Invalid integrity arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) cc->on_disk_tag_size = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (!strcasecmp(sval, "aead")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) } else if (strcasecmp(sval, "none")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) ti->error = "Unknown integrity profile";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (!cc->cipher_auth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (cc->sector_size < (1 << SECTOR_SHIFT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) cc->sector_size > 4096 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) (cc->sector_size & (cc->sector_size - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) ti->error = "Invalid feature value for sector_size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) ti->error = "Device size is not multiple of sector_size feature";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) } else if (!strcasecmp(opt_string, "iv_large_sectors"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) ti->error = "Invalid feature arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) static int crypt_report_zones(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct dm_report_zones_args *args, unsigned int nr_zones)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) args->start = cc->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) dm_report_zones_cb, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) * Construct an encryption mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct crypt_config *cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) const char *devname = dm_table_device_name(ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) int key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) unsigned int align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) unsigned long long tmpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) size_t iv_size_padding, additional_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) if (argc < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) ti->error = "Not enough arguments";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) key_size = get_key_size(&argv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (key_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) ti->error = "Cannot parse key size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (!cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) ti->error = "Cannot allocate encryption context";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) cc->key_size = key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) cc->sector_size = (1 << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) cc->sector_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) ti->private = cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) spin_lock(&dm_crypt_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) dm_crypt_clients_n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) crypt_calculate_pages_per_client();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) spin_unlock(&dm_crypt_clients_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) /* Optional parameters need to be read before cipher constructor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) if (argc > 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) if (crypt_integrity_aead(cc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) cc->dmreq_start = sizeof(struct aead_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) cc->dmreq_start = sizeof(struct skcipher_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) align_mask = crypto_skcipher_alignmask(any_tfm(cc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) if (align_mask < CRYPTO_MINALIGN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) /* Allocate the padding exactly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) & align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) * If the cipher requires greater alignment than kmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) * alignment, we don't know the exact position of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * initialization vector. We must assume worst case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) iv_size_padding = align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) additional_req_size = sizeof(struct dm_crypt_request) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) iv_size_padding + cc->iv_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) cc->iv_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) sizeof(uint64_t) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) sizeof(unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) ti->error = "Cannot allocate crypt request mempool";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) cc->per_bio_data_size = ti->per_io_data_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) ARCH_KMALLOC_MINALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) ti->error = "Cannot allocate page mempool";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) ti->error = "Cannot allocate crypt bioset";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) mutex_init(&cc->bio_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) ti->error = "Invalid iv_offset sector";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) cc->iv_offset = tmpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) ti->error = "Device lookup failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) ti->error = "Invalid device sector";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) cc->start = tmpll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) * For zoned block devices, we need to preserve the issuer write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) * ordering. To do so, disable write workqueues and force inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) * encryption completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) if (bdev_is_zoned(cc->dev->bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) ret = crypt_integrity_ctr(cc, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (!cc->tag_pool_max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) cc->tag_pool_max_sectors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) cc->tag_pool_max_sectors * cc->on_disk_tag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) ti->error = "Cannot allocate integrity tags mempool";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) cc->tag_pool_max_sectors <<= cc->sector_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (!cc->io_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) ti->error = "Couldn't create kcryptd io queue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 1, devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) cc->crypt_queue = alloc_workqueue("kcryptd/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) num_online_cpus(), devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) if (!cc->crypt_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) ti->error = "Couldn't create kcryptd queue";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) spin_lock_init(&cc->write_thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) cc->write_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (IS_ERR(cc->write_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) ret = PTR_ERR(cc->write_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) cc->write_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) ti->error = "Couldn't spawn write thread";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) wake_up_process(cc->write_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) ti->num_flush_bios = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) ti->limit_swap_bios = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) crypt_dtr(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) static int crypt_map(struct dm_target *ti, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) struct dm_crypt_io *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) bio_op(bio) == REQ_OP_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) bio_set_dev(bio, cc->dev->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if (bio_sectors(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) bio->bi_iter.bi_sector = cc->start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) dm_target_offset(ti, bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) return DM_MAPIO_REMAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) * Check if bio is too large, split as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) * Ensure that bio is a multiple of internal sector encryption size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) * and is aligned to this size as defined in IO hints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return DM_MAPIO_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) io = dm_per_bio_data(bio, cc->per_bio_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) if (cc->on_disk_tag_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) unlikely(!(io->integrity_metadata = kmalloc(tag_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) if (bio_sectors(bio) > cc->tag_pool_max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) io->integrity_metadata_from_pool = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) if (crypt_integrity_aead(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) io->ctx.r.req_aead = (struct aead_request *)(io + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) io->ctx.r.req = (struct skcipher_request *)(io + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (bio_data_dir(io->base_bio) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) if (kcryptd_io_read(io, GFP_NOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) kcryptd_queue_read(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) kcryptd_queue_crypt(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) return DM_MAPIO_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) static void crypt_status(struct dm_target *ti, status_type_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) unsigned status_flags, char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) unsigned i, sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) int num_feature_args = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) result[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) DMEMIT("%s ", cc->cipher_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) if (cc->key_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) if (cc->key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) DMEMIT(":%u:%s", cc->key_size, cc->key_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) for (i = 0; i < cc->key_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) DMEMIT("%02x", cc->key[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) DMEMIT("-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) cc->dev->name, (unsigned long long)cc->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) num_feature_args += !!ti->num_discard_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) if (cc->on_disk_tag_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) num_feature_args++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) if (num_feature_args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) DMEMIT(" %d", num_feature_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) if (ti->num_discard_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) DMEMIT(" allow_discards");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) DMEMIT(" same_cpu_crypt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) DMEMIT(" submit_from_crypt_cpus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) DMEMIT(" no_read_workqueue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) DMEMIT(" no_write_workqueue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) if (cc->on_disk_tag_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) if (cc->sector_size != (1 << SECTOR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) DMEMIT(" sector_size:%d", cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) DMEMIT(" iv_large_sectors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) static void crypt_postsuspend(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) static int crypt_preresume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) DMERR("aborting resume - crypt key is not set.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) static void crypt_resume(struct dm_target *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) /* Message interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) * key set <key>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) * key wipe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) int key_size, ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) if (argc < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (!strcasecmp(argv[0], "key")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) DMWARN("not suspended during key manipulation.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) if (argc == 3 && !strcasecmp(argv[1], "set")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) /* The key size may not be changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) key_size = get_key_size(&argv[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) if (key_size < 0 || cc->key_size != key_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) memset(argv[2], '0', strlen(argv[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) ret = crypt_set_key(cc, argv[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (cc->iv_gen_ops && cc->iv_gen_ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) ret = cc->iv_gen_ops->init(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) /* wipe the kernel key payload copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if (cc->key_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) memset(cc->key, 0, cc->key_size * sizeof(u8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) if (argc == 2 && !strcasecmp(argv[1], "wipe"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) return crypt_wipe_key(cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) DMWARN("unrecognised message received.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) static int crypt_iterate_devices(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) iterate_devices_callout_fn fn, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) return fn(ti, cc->dev, cc->start, ti->len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) struct crypt_config *cc = ti->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) * Unfortunate constraint that is required to avoid the potential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) * for exceeding underlying device's max_segments limits -- due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) * crypt_alloc_buffer() possibly allocating pages for the encryption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) * bio that are not as physically contiguous as the original bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) limits->max_segment_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) limits->logical_block_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) max_t(unsigned, limits->logical_block_size, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) limits->physical_block_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) max_t(unsigned, limits->physical_block_size, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) static struct target_type crypt_target = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) .name = "crypt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) .version = {1, 22, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) .ctr = crypt_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) .dtr = crypt_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) .features = DM_TARGET_ZONED_HM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) .report_zones = crypt_report_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) .map = crypt_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) .status = crypt_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) .postsuspend = crypt_postsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) .preresume = crypt_preresume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) .resume = crypt_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) .message = crypt_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) .iterate_devices = crypt_iterate_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) .io_hints = crypt_io_hints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) static int __init dm_crypt_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) r = dm_register_target(&crypt_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) DMERR("register failed %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) static void __exit dm_crypt_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) dm_unregister_target(&crypt_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) module_init(dm_crypt_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) module_exit(dm_crypt_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) MODULE_AUTHOR("Jana Saout <jana@saout.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) MODULE_LICENSE("GPL");