^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014-2017 Axis Communications AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/fault-inject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <crypto/gcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <crypto/xts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Max length of a line in all cache levels for Artpec SoCs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ARTPEC_CACHE_LINE_MAX 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PDMA_OUT_CFG 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PDMA_OUT_BUF_CFG 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PDMA_OUT_CMD 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PDMA_OUT_DESCRQ_PUSH 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PDMA_OUT_DESCRQ_STAT 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define A6_PDMA_IN_CFG 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define A6_PDMA_IN_BUF_CFG 0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define A6_PDMA_IN_CMD 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define A6_PDMA_IN_STATQ_PUSH 0x0038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define A6_PDMA_IN_DESCRQ_STAT 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define A6_PDMA_INTR_MASK 0x0068
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define A6_PDMA_ACK_INTR 0x006c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define A6_PDMA_MASKED_INTR 0x0074
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define A7_PDMA_IN_CFG 0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define A7_PDMA_IN_BUF_CFG 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define A7_PDMA_IN_CMD 0x0034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define A7_PDMA_IN_STATQ_PUSH 0x003c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define A7_PDMA_IN_DESCRQ_STAT 0x004C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define A7_PDMA_INTR_MASK 0x006c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define A7_PDMA_ACK_INTR 0x0070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define A7_PDMA_MASKED_INTR 0x0078
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PDMA_OUT_CFG_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PDMA_OUT_CMD_START BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define A6_PDMA_OUT_CMD_STOP BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define A7_PDMA_OUT_CMD_STOP BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define PDMA_IN_CFG_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define PDMA_IN_CMD_START BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define A6_PDMA_IN_CMD_STOP BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define A7_PDMA_IN_CMD_STOP BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define A6_CRY_MD_OPER GENMASK(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define A6_CRY_MD_CIPHER_DECR BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define A7_CRY_MD_OPER GENMASK(11, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define A7_CRY_MD_CIPHER_DECR BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* DMA metadata constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define regk_crypto_aes_cbc 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define regk_crypto_aes_ctr 0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define regk_crypto_aes_ecb 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define regk_crypto_aes_gcm 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define regk_crypto_aes_xts 0x00000005
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define regk_crypto_cache 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define a6_regk_crypto_dlkey 0x0000000a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define a7_regk_crypto_dlkey 0x0000000e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define regk_crypto_ext 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define regk_crypto_hmac_sha1 0x00000007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define regk_crypto_hmac_sha256 0x00000009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define regk_crypto_init 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define regk_crypto_key_128 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define regk_crypto_key_192 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define regk_crypto_key_256 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define regk_crypto_null 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define regk_crypto_sha1 0x00000006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define regk_crypto_sha256 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* DMA descriptor structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct pdma_descr_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned char short_descr : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned char pad1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned char eop : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned char intr : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned char short_len : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned char pad2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct pdma_data_descr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int len : 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int buf : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct pdma_short_descr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned char data[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct pdma_descr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct pdma_descr_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct pdma_data_descr data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct pdma_short_descr shrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct pdma_stat_descr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned char pad1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned char pad2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned char eop : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned char pad3 : 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int len : 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Each descriptor array can hold max 64 entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define PDMA_DESCR_COUNT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define MODULE_NAME "Artpec-6 CA"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Hash modes (including HMAC variants) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define ARTPEC6_CRYPTO_HASH_SHA1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define ARTPEC6_CRYPTO_HASH_SHA256 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Crypto modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * It operates on a descriptor array with up to 64 descriptor entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * The arrays must be 64 byte aligned in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * The ciphering unit has no registers and is completely controlled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * a 4-byte metadata that is inserted at the beginning of each dma packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * A dma packet is a sequence of descriptors terminated by setting the .eop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * field in the final descriptor of the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Multiple packets are used for providing context data, key data and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * the plain/ciphertext.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * PDMA Descriptors (Array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * +------+------+------+~~+-------+------+----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * +--+---+--+---+----+-+~~+-------+----+-+----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * __|__ +-------++-------++-------+ +----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * | MD | |Payload||Payload||Payload| | MD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * +-----+ +-------++-------++-------+ +----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct artpec6_crypto_bounce_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct artpec6_crypto_dma_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct artpec6_crypto_dma_descriptors {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 stat[PDMA_DESCR_COUNT] __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct list_head bounce_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Enough maps for all out/in buffers, and all three descr. arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dma_addr_t out_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dma_addr_t in_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_addr_t stat_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) size_t out_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) size_t in_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) size_t map_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) enum artpec6_crypto_variant {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ARTPEC6_CRYPTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ARTPEC7_CRYPTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct artpec6_crypto {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) spinlock_t queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct list_head queue; /* waiting for pdma fifo space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct list_head pending; /* submitted to pdma fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct tasklet_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct kmem_cache *dma_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int pending_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) enum artpec6_crypto_variant variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void *pad_buffer; /* cache-aligned block padding buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void *zero_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) enum artpec6_crypto_hash_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) HASH_FLAG_INIT_CTX = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) HASH_FLAG_UPDATE = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) HASH_FLAG_FINALIZE = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) HASH_FLAG_HMAC = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) HASH_FLAG_UPDATE_KEY = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct artpec6_crypto_req_common {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct list_head complete_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct artpec6_crypto_dma_descriptors *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct crypto_async_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) void (*complete)(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct artpec6_hash_request_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) char partial_buffer[SHA256_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) char partial_buffer_out[SHA256_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) char key_buffer[SHA256_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) char pad_buffer[SHA256_BLOCK_SIZE + 32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned char digeststate[SHA256_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) size_t partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u64 digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 key_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) u32 hash_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) enum artpec6_crypto_hash_flags hash_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct artpec6_crypto_req_common common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct artpec6_hash_export_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) char partial_buffer[SHA256_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned char digeststate[SHA256_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) size_t partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u64 digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int oper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned int hash_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct artpec6_hashalg_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) char hmac_key[SHA256_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) size_t hmac_key_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct crypto_shash *child_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct artpec6_crypto_request_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 cipher_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) bool decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct artpec6_crypto_req_common common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct artpec6_cryptotfm_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned char aes_key[2*AES_MAX_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) size_t key_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u32 key_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int crypto_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct crypto_sync_skcipher *fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct artpec6_crypto_aead_hw_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) __be64 aad_length_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) __be64 text_length_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) __u8 J0[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct artpec6_crypto_aead_req_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct artpec6_crypto_aead_hw_ctx hw_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u32 cipher_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) bool decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct artpec6_crypto_req_common common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* The crypto framework makes it hard to avoid this global. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static struct device *artpec6_crypto_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #ifdef CONFIG_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ARTPEC6_CRYPTO_PREPARE_HASH_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static int artpec6_crypto_prepare_aead(struct aead_request *areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) artpec6_crypto_complete_crypto(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) artpec6_crypto_complete_aead(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) artpec6_crypto_complete_hash(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct artpec6_crypto_walk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) awalk->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) awalk->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) while (nbytes && awalk->sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) size_t piece;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) WARN_ON(awalk->offset > awalk->sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) nbytes -= piece;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) awalk->offset += piece;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (awalk->offset == awalk->sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) awalk->sg = sg_next(awalk->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) awalk->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) WARN_ON(awalk->sg->length == awalk->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return awalk->sg->length - awalk->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return sg_phys(awalk->sg) + awalk->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct artpec6_crypto_bounce_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct artpec6_crypto_bounce_buffer *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) b, b->length, b->offset, b->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sg_pcopy_from_buffer(b->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) b->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) b->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) b->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) list_del(&b->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) kfree(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static inline bool artpec6_crypto_busy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int fifo_count = ac->pending_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return fifo_count > 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) spin_lock_bh(&ac->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!artpec6_crypto_busy()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) list_add_tail(&req->list, &ac->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) artpec6_crypto_start_dma(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_add_tail(&req->list, &ac->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) artpec6_crypto_common_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_unlock_bh(&ac->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) void __iomem *base = ac->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) u32 ind, statd, outd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Make descriptor content visible to the DMA before starting it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ac->pending_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dma->out_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dma->in_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dma->map_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) INIT_LIST_HEAD(&dma->bounce_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static bool fault_inject_dma_descr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #ifdef CONFIG_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * @addr: The physical address of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @len: The length of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * @eop: True if this is the last buffer in the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @return 0 on success or -ENOSPC if there are no more descriptors available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dma_addr_t addr, size_t len, bool eop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct pdma_descr *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (dma->out_cnt >= PDMA_DESCR_COUNT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) fault_inject_dma_descr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) pr_err("No free OUT DMA descriptors available!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) d = &dma->out[dma->out_cnt++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) memset(d, 0, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) d->ctrl.short_descr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) d->ctrl.eop = eop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) d->data.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) d->data.buf = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * @dst: The virtual address of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @len: The length of the data, must be between 1 to 7 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * @eop: True if this is the last buffer in the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * @return 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * -ENOSPC if no more descriptors are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * -EINVAL if the data length exceeds 7 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void *dst, unsigned int len, bool eop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct pdma_descr *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (dma->out_cnt >= PDMA_DESCR_COUNT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) fault_inject_dma_descr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pr_err("No free OUT DMA descriptors available!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) } else if (len > 7 || len < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) d = &dma->out[dma->out_cnt++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) memset(d, 0, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) d->ctrl.short_descr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) d->ctrl.short_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) d->ctrl.eop = eop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) memcpy(d->shrt.data, dst, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct page *page, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) dma_addr_t *dma_addr_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct device *dev = artpec6_crypto_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct artpec6_crypto_dma_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *dma_addr_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (dma->map_count >= ARRAY_SIZE(dma->maps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dma_addr = dma_map_page(dev, page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (dma_mapping_error(dev, dma_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) map = &dma->maps[dma->map_count++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) map->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) map->dma_addr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) map->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) *dma_addr_out = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) void *ptr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dma_addr_t *dma_addr_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct page *page = virt_to_page(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dma_addr_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ret = artpec6_crypto_dma_map_single(common, dma->in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) sizeof(dma->in[0]) * dma->in_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) DMA_TO_DEVICE, &dma->in_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret = artpec6_crypto_dma_map_single(common, dma->out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sizeof(dma->out[0]) * dma->out_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) DMA_TO_DEVICE, &dma->out_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* We only read one stat descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dma->stat[dma->in_cnt - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * to be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return artpec6_crypto_dma_map_single(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dma->stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sizeof(dma->stat[0]) * dma->in_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) DMA_BIDIRECTIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) &dma->stat_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct device *dev = artpec6_crypto_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) for (i = 0; i < dma->map_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct artpec6_crypto_dma_map *map = &dma->maps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dma->map_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /** artpec6_crypto_setup_out_descr - Setup an out descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * @dst: The virtual address of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * @len: The length of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * @eop: True if this is the last buffer in the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * @use_short: If this is true and the data length is 7 bytes or less then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * a short descriptor will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * @return 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * Any errors from artpec6_crypto_setup_out_descr_short() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * setup_out_descr_phys()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) void *dst, unsigned int len, bool eop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) bool use_short)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (use_short && len < 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return artpec6_crypto_setup_out_descr_short(common, dst, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) eop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ret = artpec6_crypto_dma_map_single(common, dst, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) &dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) len, eop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * @addr: The physical address of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @len: The length of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * @intr: True if an interrupt should be fired after HW processing of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) dma_addr_t addr, unsigned int len, bool intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct pdma_descr *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (dma->in_cnt >= PDMA_DESCR_COUNT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) fault_inject_dma_descr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) pr_err("No free IN DMA descriptors available!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) d = &dma->in[dma->in_cnt++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) memset(d, 0, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) d->ctrl.intr = intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) d->data.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) d->data.buf = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * @buffer: The virtual address to of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @len: The length of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * @last: If this is the last data buffer in the request (i.e. an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Short descriptors are not used for the in channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) void *buffer, unsigned int len, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ret = artpec6_crypto_dma_map_single(common, buffer, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) DMA_FROM_DEVICE, &dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static struct artpec6_crypto_bounce_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) artpec6_crypto_alloc_bounce(gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 2 * ARTPEC_CACHE_LINE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!bbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) base = bbuf + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return bbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct artpec6_crypto_walk *walk, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct artpec6_crypto_bounce_buffer *bbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!bbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) bbuf->length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) bbuf->sg = walk->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) bbuf->offset = walk->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) kfree(bbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct artpec6_crypto_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) size_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) while (walk->sg && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) chunk = min(count, artpec6_crypto_walk_chunklen(walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) addr = artpec6_crypto_walk_chunk_phys(walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* When destination buffers are not aligned to the cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * size we need bounce buffers. The DMA-API requires that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * entire line is owned by the DMA buffer and this holds also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * for the case when coherent DMA is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) chunk = min_t(dma_addr_t, chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ret = setup_bounce_buffer_in(common, walk, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ret = setup_bounce_buffer_in(common, walk, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ret = artpec6_crypto_dma_map_page(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) sg_page(walk->sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) walk->sg->offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) walk->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) &dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ret = artpec6_crypto_setup_in_descr_phys(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) chunk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) count = count - chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) artpec6_crypto_walk_advance(walk, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pr_err("EOL unexpected %zu bytes left\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return count ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct artpec6_crypto_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) size_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) while (walk->sg && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) chunk = min(count, artpec6_crypto_walk_chunklen(walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) addr = artpec6_crypto_walk_chunk_phys(walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (addr & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) char buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) chunk = min_t(size_t, chunk, (4-(addr&3)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) walk->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ret = artpec6_crypto_setup_out_descr_short(common, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ret = artpec6_crypto_dma_map_page(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sg_page(walk->sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) walk->sg->offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) walk->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) &dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = artpec6_crypto_setup_out_descr_phys(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) chunk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) count = count - chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) artpec6_crypto_walk_advance(walk, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) pr_err("EOL unexpected %zu bytes left\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return count ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * If the out descriptor list is non-empty, then the eop flag on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * last used out descriptor will be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * @return 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * -EINVAL if the out descriptor is empty or has overflown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct pdma_descr *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pr_err("%s: OUT descriptor list is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) MODULE_NAME, dma->out_cnt ? "empty" : "full");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) d = &dma->out[dma->out_cnt-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) d->ctrl.eop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * in descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * See artpec6_crypto_terminate_out_descrs() for return values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct artpec6_crypto_dma_descriptors *dma = common->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct pdma_descr *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pr_err("%s: IN descriptor list is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) MODULE_NAME, dma->in_cnt ? "empty" : "full");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) d = &dma->in[dma->in_cnt-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) d->ctrl.intr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /** create_hash_pad - Create a Secure Hash conformant pad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * @dst: The destination buffer to write the pad. Must be at least 64 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * @dgstlen: The total length of the hash digest in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * @bitcount: The total length of the digest in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * @return The total number of padding bytes written to @dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) unsigned int mod, target, diff, pad_bytes, size_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) __be64 bits = __cpu_to_be64(bitcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) switch (oper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case regk_crypto_sha1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case regk_crypto_sha256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case regk_crypto_hmac_sha1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) case regk_crypto_hmac_sha256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) target = 448 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) mod = 512 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) size_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) target = 896 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) mod = 1024 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) size_bytes = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) target -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) diff = dgstlen & (mod - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pad_bytes = diff > target ? target + mod - diff : target - diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) memset(dst + 1, 0, pad_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) dst[0] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (size_bytes == 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) memset(dst + 1 + pad_bytes, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) memcpy(dst + 1 + pad_bytes, &bits, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return pad_bytes + size_bytes + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct crypto_async_request *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) void (*complete)(struct crypto_async_request *req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct scatterlist *dstsg, unsigned int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) gfp_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) common->gfp_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) common->dma = kmem_cache_alloc(ac->dma_cache, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (!common->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) common->req = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) common->complete = complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct artpec6_crypto_bounce_buffer *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct artpec6_crypto_bounce_buffer *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) kfree(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) artpec6_crypto_dma_unmap_all(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) artpec6_crypto_bounce_destroy(common->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) kmem_cache_free(ac->dma_cache, common->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) common->dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Ciphering functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static int artpec6_crypto_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct artpec6_crypto_request_context *req_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) void (*complete)(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) req_ctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) switch (ctx->crypto_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) req_ctx->decrypt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) switch (ctx->crypto_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) complete = artpec6_crypto_complete_cbc_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) complete = artpec6_crypto_complete_crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ret = artpec6_crypto_common_init(&req_ctx->common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) &req->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) req->dst, req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ret = artpec6_crypto_prepare_crypto(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) artpec6_crypto_common_destroy(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return artpec6_crypto_submit(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int artpec6_crypto_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct artpec6_crypto_request_context *req_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void (*complete)(struct crypto_async_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) req_ctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) switch (ctx->crypto_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) req_ctx->decrypt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) switch (ctx->crypto_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) complete = artpec6_crypto_complete_cbc_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) complete = artpec6_crypto_complete_crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) req->dst, req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ret = artpec6_crypto_prepare_crypto(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) artpec6_crypto_common_destroy(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return artpec6_crypto_submit(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) size_t iv_len = crypto_skcipher_ivsize(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unsigned int counter = be32_to_cpup((__be32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) (req->iv + iv_len - 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * The hardware uses only the last 32-bits as the counter while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * the whole IV is a counter. So fallback if the counter is going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * overlow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (counter + nblks < counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) pr_debug("counter %x will overflow (nblks %u), falling back\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) counter, counter + nblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ctx->key_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) skcipher_request_set_sync_tfm(subreq, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) skcipher_request_set_callback(subreq, req->base.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) skcipher_request_set_crypt(subreq, req->src, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) req->cryptlen, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ret = encrypt ? crypto_skcipher_encrypt(subreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) : crypto_skcipher_decrypt(subreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) skcipher_request_zero(subreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return encrypt ? artpec6_crypto_encrypt(req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) : artpec6_crypto_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return artpec6_crypto_ctr_crypt(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return artpec6_crypto_ctr_crypt(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * AEAD functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) memset(tfm_ctx, 0, sizeof(*tfm_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) crypto_aead_set_reqsize(tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) sizeof(struct artpec6_crypto_aead_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (len != 16 && len != 24 && len != 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ctx->key_length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) memcpy(ctx->aes_key, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static int artpec6_crypto_aead_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) req_ctx->decrypt = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) artpec6_crypto_complete_aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) ret = artpec6_crypto_prepare_aead(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) artpec6_crypto_common_destroy(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return artpec6_crypto_submit(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int artpec6_crypto_aead_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) req_ctx->decrypt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (req->cryptlen < AES_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) ret = artpec6_crypto_common_init(&req_ctx->common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) &req->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) artpec6_crypto_complete_aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ret = artpec6_crypto_prepare_aead(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) artpec6_crypto_common_destroy(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return artpec6_crypto_submit(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) size_t contextsize = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) size_t blocksize = crypto_tfm_alg_blocksize(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct artpec6_crypto_req_common *common = &req_ctx->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) u32 sel_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) bool ext_ctx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bool run_hw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) artpec6_crypto_init_dma_operation(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* Upload HMAC key, must be first the first packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) a6_regk_crypto_dlkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) a7_regk_crypto_dlkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Copy and pad up the key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) memcpy(req_ctx->key_buffer, ctx->hmac_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) ctx->hmac_key_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) blocksize - ctx->hmac_key_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) (void *)&req_ctx->key_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) sizeof(req_ctx->key_md), false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) req_ctx->key_buffer, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Restore context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) sel_ctx = regk_crypto_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ext_ctx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) sel_ctx = regk_crypto_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /* If this is the final round, set the final flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* If this is the final round, set the final flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* Setup up metadata descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) (void *)&req_ctx->hash_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) sizeof(req_ctx->hash_md), false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (ext_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) req_ctx->digeststate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) contextsize, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) size_t done_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) size_t ready_bytes = round_down(total_bytes, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct artpec6_crypto_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) run_hw = ready_bytes > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (req_ctx->partial_bytes && ready_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* We have a partial buffer and will at least some bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * to the HW. Empty this partial buffer before tackling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * the SG lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) memcpy(req_ctx->partial_buffer_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) req_ctx->partial_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) req_ctx->partial_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) req_ctx->partial_buffer_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) req_ctx->partial_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* Reset partial buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) done_bytes += req_ctx->partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) req_ctx->partial_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) artpec6_crypto_walk_init(&walk, areq->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ready_bytes -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) done_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (walk.sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) size_t sg_skip = ready_bytes - done_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) size_t sg_rem = areq->nbytes - sg_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) req_ctx->partial_buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) req_ctx->partial_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) sg_rem, sg_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) req_ctx->partial_bytes += sg_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) req_ctx->digcnt += ready_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* Finalize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) size_t hash_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) u64 digest_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) u32 oper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (variant == ARTPEC6_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /* Write out the partial buffer if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (req_ctx->partial_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) memcpy(req_ctx->partial_buffer_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) req_ctx->partial_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) req_ctx->partial_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) req_ctx->partial_buffer_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) req_ctx->partial_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) req_ctx->digcnt += req_ctx->partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) req_ctx->partial_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (req_ctx->hash_flags & HASH_FLAG_HMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) digest_bits = 8 * (req_ctx->digcnt + blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) digest_bits = 8 * req_ctx->digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Add the hash pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) req_ctx->digcnt, digest_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) error = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) req_ctx->pad_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) hash_pad_len, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) req_ctx->digcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /* Descriptor for the final result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) error = artpec6_crypto_setup_in_descr(common, areq->result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) } else { /* This is not the final operation for this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (!run_hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* Save the result to the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) error = artpec6_crypto_setup_in_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) req_ctx->digeststate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) contextsize, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) /* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) HASH_FLAG_FINALIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) error = artpec6_crypto_terminate_in_descrs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) error = artpec6_crypto_terminate_out_descrs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) error = artpec6_crypto_dma_map_descs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return ARTPEC6_CRYPTO_PREPARE_HASH_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ctx->fallback =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 0, CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (IS_ERR(ctx->fallback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return PTR_ERR(ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) memset(ctx, 0, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) crypto_free_sync_skcipher(ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) artpec6_crypto_aes_exit(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct artpec6_cryptotfm_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) memcpy(ctx->aes_key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ctx->key_length = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct artpec6_cryptotfm_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ret = xts_check_key(&cipher->base, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) case 48:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) memcpy(ctx->aes_key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ctx->key_length = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * @req: The asynch request to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * @return 0 if the dma job was successfully prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * <0 on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * This function sets up the PDMA descriptors for a block cipher request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * The required padding is added for AES-CTR using a statically defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * The PDMA descriptor list will be as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * IN: <CIPHER_MD><data_0>...[data_n]<intr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct artpec6_crypto_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct artpec6_crypto_request_context *req_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) size_t iv_len = crypto_skcipher_ivsize(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct artpec6_crypto_req_common *common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) bool cipher_decr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) size_t cipher_klen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) u32 oper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) req_ctx = skcipher_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) common = &req_ctx->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) artpec6_crypto_init_dma_operation(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (variant == ARTPEC6_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) sizeof(ctx->key_md), false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) ctx->key_length, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) req_ctx->cipher_md = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) cipher_klen = ctx->key_length/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) cipher_klen = ctx->key_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /* Metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) switch (cipher_klen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) cipher_len = regk_crypto_key_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) cipher_len = regk_crypto_key_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) cipher_len = regk_crypto_key_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) pr_err("%s: Invalid key length %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) MODULE_NAME, ctx->key_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) switch (ctx->crypto_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) oper = regk_crypto_aes_ecb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) cipher_decr = req_ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) oper = regk_crypto_aes_cbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) cipher_decr = req_ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) oper = regk_crypto_aes_ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) cipher_decr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) oper = regk_crypto_aes_xts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) cipher_decr = req_ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (variant == ARTPEC6_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) pr_err("%s: Invalid cipher mode %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) MODULE_NAME, ctx->crypto_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) cipher_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (cipher_decr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) cipher_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (cipher_decr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ret = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) &req_ctx->cipher_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) sizeof(req_ctx->cipher_md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (iv_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /* Data out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) artpec6_crypto_walk_init(&walk, areq->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /* Data in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) artpec6_crypto_walk_init(&walk, areq->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /* CTR-mode padding required by the HW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) areq->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (pad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) ret = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) ac->pad_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) pad, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) ret = artpec6_crypto_setup_in_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) ac->pad_buffer, pad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ret = artpec6_crypto_terminate_out_descrs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) ret = artpec6_crypto_terminate_in_descrs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return artpec6_crypto_dma_map_descs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static int artpec6_crypto_prepare_aead(struct aead_request *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) size_t input_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct artpec6_crypto_req_common *common = &req_ctx->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) u32 md_cipher_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) artpec6_crypto_init_dma_operation(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* Key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) a6_regk_crypto_dlkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) a7_regk_crypto_dlkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) sizeof(ctx->key_md), false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) ctx->key_length, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) req_ctx->cipher_md = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) switch (ctx->key_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) md_cipher_len = regk_crypto_key_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) md_cipher_len = regk_crypto_key_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) md_cipher_len = regk_crypto_key_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) regk_crypto_aes_gcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) md_cipher_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (req_ctx->decrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) regk_crypto_aes_gcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) md_cipher_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (req_ctx->decrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) ret = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) (void *) &req_ctx->cipher_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) sizeof(req_ctx->cipher_md), false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /* For the decryption, cryptlen includes the tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) input_length = areq->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (req_ctx->decrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) input_length -= crypto_aead_authsize(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* Prepare the context buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) req_ctx->hw_ctx.aad_length_bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) __cpu_to_be64(8*areq->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) req_ctx->hw_ctx.text_length_bits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) __cpu_to_be64(8*input_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) // The HW omits the initial increment of the counter field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct artpec6_crypto_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) artpec6_crypto_walk_init(&walk, areq->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) /* Associated data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) count = areq->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (!IS_ALIGNED(areq->assoclen, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) size_t assoc_pad = 16 - (areq->assoclen % 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /* The HW mandates zero padding here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) ret = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) ac->zero_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) assoc_pad, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) /* Data to crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) count = input_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (!IS_ALIGNED(input_length, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) size_t crypto_pad = 16 - (input_length % 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /* The HW mandates zero padding here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) ret = artpec6_crypto_setup_out_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) ac->zero_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) crypto_pad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) /* Data from crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) struct artpec6_crypto_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) size_t output_len = areq->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (req_ctx->decrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) output_len -= crypto_aead_authsize(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) artpec6_crypto_walk_init(&walk, areq->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* skip associated data in the output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) count = output_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /* Put padding between the cryptotext and the auth tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) if (!IS_ALIGNED(output_len, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) size_t crypto_pad = 16 - (output_len % 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) ret = artpec6_crypto_setup_in_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) ac->pad_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) crypto_pad, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* The authentication tag shall follow immediately after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * the output ciphertext. For decryption it is put in a context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * buffer for later compare against the input tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (req_ctx->decrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ret = artpec6_crypto_setup_in_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /* For encryption the requested tag size may be smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * than the hardware's generated tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) size_t authsize = crypto_aead_authsize(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (authsize < AES_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) count = AES_BLOCK_SIZE - authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) ret = artpec6_crypto_setup_in_descr(common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ac->pad_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) count, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ret = artpec6_crypto_terminate_in_descrs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) ret = artpec6_crypto_terminate_out_descrs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return artpec6_crypto_dma_map_descs(common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct list_head *completions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct artpec6_crypto_req_common *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) req = list_first_entry(&ac->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct artpec6_crypto_req_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) list_move_tail(&req->list, &ac->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) artpec6_crypto_start_dma(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) list_add_tail(&req->complete_in_progress, completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * In some cases, the hardware can raise an in_eop_flush interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * before actually updating the status, so we have an timer which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * recheck the status on timeout. Since the cases are expected to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * very rare, we use a relatively large timeout value. There should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * no noticeable negative effect if we timeout spuriously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (ac->pending_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) del_timer(&ac->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static void artpec6_crypto_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct artpec6_crypto *ac = from_timer(ac, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) tasklet_schedule(&ac->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static void artpec6_crypto_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct artpec6_crypto_req_common *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct artpec6_crypto_req_common *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct list_head complete_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct list_head complete_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) INIT_LIST_HEAD(&complete_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) INIT_LIST_HEAD(&complete_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (list_empty(&ac->pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) pr_debug("Spurious IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) spin_lock_bh(&ac->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) list_for_each_entry_safe(req, n, &ac->pending, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct artpec6_crypto_dma_descriptors *dma = req->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) dma_addr_t stataddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) dma_sync_single_for_cpu(artpec6_crypto_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) stataddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) stat = req->dma->stat[req->dma->in_cnt-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /* A non-zero final status descriptor indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * this job has finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) pr_debug("Request %p status is %X\n", req, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /* Allow testing of timeout handling with fault injection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) #ifdef CONFIG_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (should_fail(&artpec6_crypto_fail_status_read, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) pr_debug("Completing request %p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) list_move_tail(&req->list, &complete_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) ac->pending_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) artpec6_crypto_process_queue(ac, &complete_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) spin_unlock_bh(&ac->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /* Perform the completion callbacks without holding the queue lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * to allow new request submissions from the callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) list_for_each_entry_safe(req, n, &complete_done, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) artpec6_crypto_dma_unmap_all(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) artpec6_crypto_copy_bounce_buffers(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) artpec6_crypto_common_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) req->complete(req->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) list_for_each_entry_safe(req, n, &complete_in_progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) complete_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) req->req->complete(req->req, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) req->complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct skcipher_request *cipher_req = container_of(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct skcipher_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) cipher_req->cryptlen - AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) AES_BLOCK_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) req->complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) struct skcipher_request *cipher_req = container_of(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) struct skcipher_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) cipher_req->cryptlen - AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) AES_BLOCK_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) req->complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /* Verify GCM hashtag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct aead_request *areq = container_of(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) struct aead_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct crypto_aead *aead = crypto_aead_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (req_ctx->decrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) u8 input_tag[AES_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) unsigned int authsize = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) sg_pcopy_to_buffer(areq->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) sg_nents(areq->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) input_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) authsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) areq->assoclen + areq->cryptlen -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (crypto_memneq(req_ctx->decryption_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) input_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) authsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) pr_debug("***EBADMSG:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) input_tag, authsize, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) req_ctx->decryption_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) authsize, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) result = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) req->complete(req, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) req->complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) /*------------------- Hash functions -----------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) size_t blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (!keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) pr_err("Invalid length (%d) of HMAC key\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (keylen > blocksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) tfm_ctx->hmac_key_length = blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) tfm_ctx->hmac_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) memcpy(tfm_ctx->hmac_key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) tfm_ctx->hmac_key_length = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) u32 oper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) memset(req_ctx, 0, sizeof(*req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (hmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) case ARTPEC6_CRYPTO_HASH_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) case ARTPEC6_CRYPTO_HASH_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (variant == ARTPEC6_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (!req_ctx->common.dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) ret = artpec6_crypto_common_init(&req_ctx->common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) &req->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) artpec6_crypto_complete_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) ret = artpec6_crypto_prepare_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) case ARTPEC6_CRYPTO_PREPARE_HASH_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) ret = artpec6_crypto_submit(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) artpec6_crypto_common_destroy(&req_ctx->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static int artpec6_crypto_hash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return artpec6_crypto_prepare_submit_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) static int artpec6_crypto_hash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) req_ctx->hash_flags |= HASH_FLAG_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return artpec6_crypto_prepare_submit_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) static int artpec6_crypto_sha1_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static int artpec6_crypto_sha1_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return artpec6_crypto_prepare_submit_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) static int artpec6_crypto_sha256_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) static int artpec6_crypto_sha256_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return artpec6_crypto_prepare_submit_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) return artpec6_crypto_prepare_submit_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) const char *base_hash_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) sizeof(struct artpec6_hash_request_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) memset(tfm_ctx, 0, sizeof(*tfm_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (base_hash_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct crypto_shash *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) child = crypto_alloc_shash(base_hash_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (IS_ERR(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return PTR_ERR(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) tfm_ctx->child_hash = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) return artpec6_crypto_ahash_init_common(tfm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) return artpec6_crypto_ahash_init_common(tfm, "sha256");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (tfm_ctx->child_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) crypto_free_shash(tfm_ctx->child_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) tfm_ctx->hmac_key_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) struct artpec6_hash_export_state *state = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) BUILD_BUG_ON(sizeof(state->partial_buffer) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) sizeof(ctx->partial_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) state->digcnt = ctx->digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) state->partial_bytes = ctx->partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) state->hash_flags = ctx->hash_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (variant == ARTPEC6_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) memcpy(state->partial_buffer, ctx->partial_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) sizeof(state->partial_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) memcpy(state->digeststate, ctx->digeststate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) sizeof(state->digeststate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) const struct artpec6_hash_export_state *state = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) memset(ctx, 0, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) ctx->digcnt = state->digcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) ctx->partial_bytes = state->partial_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) ctx->hash_flags = state->hash_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (variant == ARTPEC6_CRYPTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) memcpy(ctx->partial_buffer, state->partial_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) sizeof(state->partial_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) memcpy(ctx->digeststate, state->digeststate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) sizeof(state->digeststate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static int init_crypto_hw(struct artpec6_crypto *ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) void __iomem *base = ac->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) u32 out_descr_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) u32 out_data_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) u32 in_data_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) u32 in_descr_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) u32 in_stat_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) u32 in, out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * The PDMA unit contains 1984 bytes of internal memory for the OUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) * channels and 1024 bytes for the IN channel. This is an elastic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) * memory used to internally store the descriptors and data. The values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * ares specified in 64 byte incremements. Trustzone buffers are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) * used at this stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) out_data_buf_size = 16; /* 1024 bytes for data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) out_descr_buf_size = 15; /* 960 bytes for descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) in_data_buf_size = 8; /* 512 bytes for data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) in_descr_buf_size = 4; /* 256 bytes for descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) in_stat_buf_size = 4; /* 256 bytes for stat descrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) BUILD_BUG_ON_MSG((out_data_buf_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) + out_descr_buf_size) * 64 > 1984,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) "Invalid OUT configuration");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) BUILD_BUG_ON_MSG((in_data_buf_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) + in_descr_buf_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) + in_stat_buf_size) * 64 > 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) "Invalid IN configuration");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) base + A6_PDMA_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) base + A7_PDMA_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) void __iomem *base = ac->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) writel_relaxed(0, base + A6_PDMA_IN_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) writel_relaxed(0, base + A7_PDMA_IN_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) writel_relaxed(0, base + PDMA_OUT_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct artpec6_crypto *ac = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) enum artpec6_crypto_variant variant = ac->variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) void __iomem *base = ac->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) u32 mask_in_data, mask_in_eop_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) u32 in_cmd_flush_stat, in_cmd_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) u32 ack_intr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) u32 ack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) u32 intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (variant == ARTPEC6_CRYPTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) in_cmd_reg = A6_PDMA_IN_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) ack_intr_reg = A6_PDMA_ACK_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) in_cmd_reg = A7_PDMA_IN_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) ack_intr_reg = A7_PDMA_ACK_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* We get two interrupt notifications from each job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * The in_data means all data was sent to memory and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * we request a status flush command to write the per-job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * status to its status vector. This ensures that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * tasklet can detect exactly how many submitted jobs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * that have finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (intr & mask_in_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) ack |= mask_in_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (intr & mask_in_eop_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) ack |= mask_in_eop_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) writel_relaxed(ack, base + ack_intr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (intr & mask_in_eop_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) tasklet_schedule(&ac->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) /*------------------- Algorithm definitions ----------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) /* Hashes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) static struct ahash_alg hash_algos[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /* SHA-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) .init = artpec6_crypto_sha1_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) .update = artpec6_crypto_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) .final = artpec6_crypto_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) .digest = artpec6_crypto_sha1_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) .import = artpec6_crypto_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) .export = artpec6_crypto_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) .halg.statesize = sizeof(struct artpec6_hash_export_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) .cra_driver_name = "artpec-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) .cra_init = artpec6_crypto_ahash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) .cra_exit = artpec6_crypto_ahash_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) /* SHA-256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) .init = artpec6_crypto_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) .update = artpec6_crypto_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) .final = artpec6_crypto_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) .digest = artpec6_crypto_sha256_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) .import = artpec6_crypto_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) .export = artpec6_crypto_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) .halg.statesize = sizeof(struct artpec6_hash_export_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) .cra_driver_name = "artpec-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) .cra_init = artpec6_crypto_ahash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) .cra_exit = artpec6_crypto_ahash_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) /* HMAC SHA-256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) .init = artpec6_crypto_hmac_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) .update = artpec6_crypto_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) .final = artpec6_crypto_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) .digest = artpec6_crypto_hmac_sha256_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) .import = artpec6_crypto_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) .export = artpec6_crypto_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) .setkey = artpec6_crypto_hash_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) .halg.statesize = sizeof(struct artpec6_hash_export_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) .cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) .cra_driver_name = "artpec-hmac-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) .cra_exit = artpec6_crypto_ahash_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) /* Crypto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static struct skcipher_alg crypto_algos[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) /* AES - ECB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) .cra_name = "ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) .cra_driver_name = "artpec6-ecb-aes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) .setkey = artpec6_crypto_cipher_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) .encrypt = artpec6_crypto_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) .decrypt = artpec6_crypto_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) .init = artpec6_crypto_aes_ecb_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) .exit = artpec6_crypto_aes_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) /* AES - CTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) .cra_name = "ctr(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) .cra_driver_name = "artpec6-ctr-aes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) .setkey = artpec6_crypto_cipher_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) .encrypt = artpec6_crypto_ctr_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) .decrypt = artpec6_crypto_ctr_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) .init = artpec6_crypto_aes_ctr_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) .exit = artpec6_crypto_aes_ctr_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) /* AES - CBC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) .cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) .cra_driver_name = "artpec6-cbc-aes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) .setkey = artpec6_crypto_cipher_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) .encrypt = artpec6_crypto_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) .decrypt = artpec6_crypto_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) .init = artpec6_crypto_aes_cbc_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) .exit = artpec6_crypto_aes_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) /* AES - XTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) .cra_name = "xts(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) .cra_driver_name = "artpec6-xts-aes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) .min_keysize = 2*AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) .max_keysize = 2*AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) .ivsize = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) .setkey = artpec6_crypto_xts_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) .encrypt = artpec6_crypto_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) .decrypt = artpec6_crypto_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) .init = artpec6_crypto_aes_xts_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) .exit = artpec6_crypto_aes_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) static struct aead_alg aead_algos[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) .init = artpec6_crypto_aead_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) .setkey = artpec6_crypto_aead_set_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) .encrypt = artpec6_crypto_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) .decrypt = artpec6_crypto_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) .ivsize = GCM_AES_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) .maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) .cra_name = "gcm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) .cra_driver_name = "artpec-gcm-aes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) .cra_alignmask = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) struct dbgfs_u32 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) mode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) u32 *flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) static struct dentry *dbgfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) static void artpec6_crypto_init_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) #ifdef CONFIG_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) fault_create_debugfs_attr("fail_status_read", dbgfs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) &artpec6_crypto_fail_status_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) &artpec6_crypto_fail_dma_array_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) static void artpec6_crypto_free_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) debugfs_remove_recursive(dbgfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) dbgfs_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) static const struct of_device_id artpec6_crypto_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) static int artpec6_crypto_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) enum artpec6_crypto_variant variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) struct artpec6_crypto *ac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) if (artpec6_crypto_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) match = of_match_node(artpec6_crypto_of_match, dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) variant = (enum artpec6_crypto_variant)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (!ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) platform_set_drvdata(pdev, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) ac->variant = variant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) spin_lock_init(&ac->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) INIT_LIST_HEAD(&ac->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) INIT_LIST_HEAD(&ac->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) ac->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) sizeof(struct artpec6_crypto_dma_descriptors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (!ac->dma_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) artpec6_crypto_init_debugfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) tasklet_init(&ac->task, artpec6_crypto_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) (unsigned long)ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) if (!ac->pad_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (!ac->zero_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) err = init_crypto_hw(ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) goto free_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) "artpec6-crypto", ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) goto disable_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) artpec6_crypto_dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) dev_err(dev, "Failed to register ahashes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) goto disable_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) dev_err(dev, "Failed to register ciphers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) goto unregister_ahashes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) dev_err(dev, "Failed to register aeads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) goto unregister_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) unregister_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) unregister_ahashes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) disable_hw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) artpec6_crypto_disable_hw(ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) free_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) kmem_cache_destroy(ac->dma_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) static int artpec6_crypto_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) struct artpec6_crypto *ac = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) int irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) tasklet_disable(&ac->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) devm_free_irq(&pdev->dev, irq, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) tasklet_kill(&ac->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) del_timer_sync(&ac->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) artpec6_crypto_disable_hw(ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) kmem_cache_destroy(ac->dma_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) artpec6_crypto_free_debugfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) static struct platform_driver artpec6_crypto_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) .probe = artpec6_crypto_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) .remove = artpec6_crypto_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) .name = "artpec6-crypto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) .of_match_table = artpec6_crypto_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) module_platform_driver(artpec6_crypto_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) MODULE_AUTHOR("Axis Communications AB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) MODULE_LICENSE("GPL");