^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/ceph/ceph_debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <crypto/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/key-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <keys/ceph-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <keys/user-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ceph/decode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "crypto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Set ->key and ->tfm. The rest of the key should be filled in before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static int set_secret(struct ceph_crypto_key *key, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int noio_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) key->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) key->tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) switch (key->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) case CEPH_CRYPTO_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return 0; /* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) case CEPH_CRYPTO_AES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (!key->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) key->key = kmemdup(buf, key->len, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!key->key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) noio_flag = memalloc_noio_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) memalloc_noio_restore(noio_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (IS_ERR(key->tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ret = PTR_ERR(key->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) key->tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ceph_crypto_key_destroy(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) const struct ceph_crypto_key *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) memcpy(dst, src, sizeof(struct ceph_crypto_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return set_secret(dst, src->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (*p + sizeof(u16) + sizeof(key->created) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) sizeof(u16) + key->len > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ceph_encode_16(p, key->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ceph_encode_copy(p, &key->created, sizeof(key->created));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ceph_encode_16(p, key->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ceph_encode_copy(p, key->key, key->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) key->type = ceph_decode_16(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ceph_decode_copy(p, &key->created, sizeof(key->created));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) key->len = ceph_decode_16(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ceph_decode_need(p, end, key->len, bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ret = set_secret(key, *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *p += key->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) dout("failed to decode crypto key\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int inlen = strlen(inkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int blen = inlen * 3 / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void *buf, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dout("crypto_key_unarmor %s\n", inkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) buf = kmalloc(blen, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) blen = ceph_unarmor(buf, inkey, inkey+inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (blen < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ret = ceph_crypto_key_decode(key, &p, p + blen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dout("crypto_key_unarmor key %p type %d len %d\n", key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) key->type, key->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) kfree(key->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) key->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (key->tfm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) crypto_free_sync_skcipher(key->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) key->tfm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Should be used for buffers allocated with ceph_kvmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * in-buffer (msg front).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Dispose of @sgt with teardown_sgtable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * in cases where a single sg is sufficient. No attempt to reduce the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * number of sgs by squeezing physically contiguous pages together is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * made though, for simplicity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const void *buf, unsigned int buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) const bool is_vmalloc = is_vmalloc_addr(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int off = offset_in_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int chunk_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (buf_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) memset(sgt, 0, sizeof(*sgt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (is_vmalloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) chunk_cnt = chunk_len >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) chunk_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (chunk_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) WARN_ON(chunk_cnt != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sg_init_table(prealloc_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) sgt->sgl = prealloc_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sgt->nents = sgt->orig_nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int len = min(chunk_len - off, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (is_vmalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) page = vmalloc_to_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) page = virt_to_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) sg_set_page(sg, page, len, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) buf_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) WARN_ON(buf_len != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void teardown_sgtable(struct sg_table *sgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (sgt->orig_nents > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sg_free_table(sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) void *buf, int buf_len, int in_len, int *pout_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct sg_table sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct scatterlist prealloc_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) char iv[AES_BLOCK_SIZE] __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int crypt_len = encrypt ? in_len + pad_byte : in_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) WARN_ON(crypt_len > buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) memset(buf + in_len, pad_byte, pad_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) memcpy(iv, aes_iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) skcipher_request_set_sync_tfm(req, key->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) skcipher_request_set_callback(req, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) key->key, key->len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) buf, crypt_len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ret = crypto_skcipher_encrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ret = crypto_skcipher_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) skcipher_request_zero(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pr_err("%s %scrypt failed: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) encrypt ? "en" : "de", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto out_sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) buf, crypt_len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *pout_len = crypt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pad_byte = *(char *)(buf + in_len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) in_len >= pad_byte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *pout_len = in_len - pad_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pr_err("%s got bad padding %d on in_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) __func__, pad_byte, in_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto out_sgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) out_sgt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) teardown_sgtable(&sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) void *buf, int buf_len, int in_len, int *pout_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) switch (key->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case CEPH_CRYPTO_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *pout_len = in_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case CEPH_CRYPTO_AES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pout_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int ceph_key_preparse(struct key_preparsed_payload *prep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct ceph_crypto_key *ckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) size_t datalen = prep->datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (datalen <= 0 || datalen > 32767 || !prep->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!ckey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* TODO ceph_crypto_key_decode should really take const input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) p = (void *)prep->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto err_ckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) prep->payload.data[0] = ckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) prep->quotalen = datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) err_ckey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) kfree(ckey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct ceph_crypto_key *ckey = prep->payload.data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ceph_crypto_key_destroy(ckey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kfree(ckey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void ceph_key_destroy(struct key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct ceph_crypto_key *ckey = key->payload.data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ceph_crypto_key_destroy(ckey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) kfree(ckey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct key_type key_type_ceph = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .name = "ceph",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .preparse = ceph_key_preparse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .free_preparse = ceph_key_free_preparse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .instantiate = generic_key_instantiate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .destroy = ceph_key_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int __init ceph_crypto_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return register_key_type(&key_type_ceph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void ceph_crypto_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unregister_key_type(&key_type_ceph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }