^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Freescale i.MX23/i.MX28 Data Co-Processor driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 Marek Vasut <marex@denx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/stmp_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DCP_MAX_CHANS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DCP_BUF_SZ PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DCP_SHA_PAY_SZ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DCP_ALIGNMENT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Null hashes to align with hw behavior on imx6sl and ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * these are flipped for consistency with hw output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static const uint8_t sha1_null_hash[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static const uint8_t sha256_null_hash[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* DCP DMA descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct dcp_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) uint32_t next_cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) uint32_t control0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) uint32_t control1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) uint32_t source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) uint32_t destination;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) uint32_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) uint32_t payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Coherent aligned block for bounce buffering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct dcp_coherent_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) uint8_t aes_in_buf[DCP_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) uint8_t aes_out_buf[DCP_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) uint8_t sha_in_buf[DCP_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) uint8_t aes_key[2 * AES_KEYSIZE_128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct dcp_dma_desc desc[DCP_MAX_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct dcp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) uint32_t caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct dcp_coherent_block *coh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct completion completion[DCP_MAX_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spinlock_t lock[DCP_MAX_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct task_struct *thread[DCP_MAX_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct crypto_queue queue[DCP_MAX_CHANS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct clk *dcp_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) enum dcp_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DCP_CHAN_HASH_SHA = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) DCP_CHAN_CRYPTO = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct dcp_async_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Common context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) enum dcp_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) uint32_t fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* SHA Hash-specific context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uint32_t alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned int hot:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Crypto-specific context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct crypto_skcipher *fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) uint8_t key[AES_KEYSIZE_128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct dcp_aes_req_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int enc:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int ecb:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct skcipher_request fallback_req; // keep at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct dcp_sha_req_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int init:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int fini:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct dcp_export_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct dcp_sha_req_ctx req_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct dcp_async_ctx async_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * There can even be only one instance of the MXS DCP due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * design of Linux Crypto API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static struct dcp *global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* DCP register layout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define MXS_DCP_CTRL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define MXS_DCP_STAT 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define MXS_DCP_STAT_CLR 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define MXS_DCP_STAT_IRQ_MASK 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define MXS_DCP_CHANNELCTRL 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define MXS_DCP_CAPABILITY1 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define MXS_DCP_CONTEXT 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* DMA descriptor bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int dma_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) const int chan = actx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) uint32_t stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dma_err = dma_mapping_error(sdcp->dev, desc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (dma_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return dma_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) reinit_completion(&sdcp->completion[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Clear status register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Load the DMA descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Increment the semaphore to start the DMA transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = wait_for_completion_timeout(&sdcp->completion[chan],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) msecs_to_jiffies(1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) chan, readl(sdcp->base + MXS_DCP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (stat & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) chan, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Encryption (AES128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct skcipher_request *req, int init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dma_addr_t key_phys, src_phys, dst_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ret = dma_mapping_error(sdcp->dev, key_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) DCP_BUF_SZ, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ret = dma_mapping_error(sdcp->dev, src_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto err_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) DCP_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ret = dma_mapping_error(sdcp->dev, dst_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto err_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (actx->fill % AES_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dev_err(sdcp->dev, "Invalid block size!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto aes_done_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Fill in the DMA descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) MXS_DCP_CONTROL0_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) MXS_DCP_CONTROL0_ENABLE_CIPHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Payload contains the key. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (rctx->enc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (rctx->ecb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) desc->next_cmd_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) desc->source = src_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) desc->destination = dst_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) desc->size = actx->fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) desc->payload = key_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) desc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ret = mxs_dcp_start_dma(actx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) aes_done_run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) err_dst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) err_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct skcipher_request *req = skcipher_request_cast(arq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct scatterlist *dst = req->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct scatterlist *src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int dst_nents = sg_nents(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) const int out_off = DCP_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) uint8_t *in_buf = sdcp->coh->aes_in_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) uint8_t *out_buf = sdcp->coh->aes_out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) uint32_t dst_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) uint8_t *src_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) uint32_t last_out_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) uint8_t *key = sdcp->coh->aes_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned int i, len, clen, tlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int init = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) bool limit_hit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) actx->fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Copy the key from the temporary location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) memcpy(key, actx->key, actx->key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!rctx->ecb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Copy the CBC IV just past the key. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* CBC needs the INIT set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) for_each_sg(req->src, src, sg_nents(req->src), i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) src_buf = sg_virt(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) len = sg_dma_len(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) tlen += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) limit_hit = tlen > req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (limit_hit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) len = req->cryptlen - (tlen - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (actx->fill + len > out_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) clen = out_off - actx->fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) clen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) memcpy(in_buf + actx->fill, src_buf, clen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) len -= clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) src_buf += clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) actx->fill += clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * If we filled the buffer or this is the last SG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * submit the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (actx->fill == out_off || sg_is_last(src) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) limit_hit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret = mxs_dcp_run_aes(actx, req, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) init = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) sg_pcopy_from_buffer(dst, dst_nents, out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) actx->fill, dst_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dst_off += actx->fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) last_out_len = actx->fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) actx->fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (limit_hit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* Copy the IV for CBC for chaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!rctx->ecb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (rctx->enc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int dcp_chan_thread_aes(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) const int chan = DCP_CHAN_CRYPTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct crypto_async_request *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct crypto_async_request *arq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_lock(&sdcp->lock[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) backlog = crypto_get_backlog(&sdcp->queue[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) arq = crypto_dequeue_request(&sdcp->queue[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_unlock(&sdcp->lock[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!backlog && !arq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (arq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret = mxs_dcp_aes_block_crypt(arq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) arq->complete(arq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) req->base.complete, req->base.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) req->cryptlen, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (enc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ret = crypto_skcipher_encrypt(&rctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = crypto_skcipher_decrypt(&rctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct crypto_async_request *arq = &req->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (unlikely(actx->key_len != AES_KEYSIZE_128))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return mxs_dcp_block_fallback(req, enc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) rctx->enc = enc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rctx->ecb = ecb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) actx->chan = DCP_CHAN_CRYPTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_lock(&sdcp->lock[actx->chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_unlock(&sdcp->lock[actx->chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) wake_up_process(sdcp->thread[actx->chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return mxs_dcp_aes_enqueue(req, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return mxs_dcp_aes_enqueue(req, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return mxs_dcp_aes_enqueue(req, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return mxs_dcp_aes_enqueue(req, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * AES 128 is supposed by the hardware, store key into temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * buffer and exit. We must use the temporary buffer here, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * there can still be an operation in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) actx->key_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (len == AES_KEYSIZE_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) memcpy(actx->key, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * If the requested AES key size is not supported by the hardware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * but is supported by in-kernel software implementation, we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * software fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) crypto_skcipher_set_flags(actx->fallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return crypto_skcipher_setkey(actx->fallback, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct crypto_skcipher *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (IS_ERR(blk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return PTR_ERR(blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) actx->fallback = blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) crypto_skcipher_reqsize(blk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) crypto_free_skcipher(actx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Hashing (SHA1/SHA256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int mxs_dcp_run_sha(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dma_addr_t digest_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) DCP_BUF_SZ, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = dma_mapping_error(sdcp->dev, buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Fill in the DMA descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) MXS_DCP_CONTROL0_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) MXS_DCP_CONTROL0_ENABLE_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (rctx->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) desc->control1 = actx->alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) desc->next_cmd_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) desc->source = buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) desc->destination = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) desc->size = actx->fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) desc->payload = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) desc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Align driver with hw behavior when generating null hashes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (rctx->init && rctx->fini && desc->size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) const uint8_t *sha_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) sha1_null_hash : sha256_null_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto done_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Set HASH_TERM bit for last transfer block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (rctx->fini) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ret = dma_mapping_error(sdcp->dev, digest_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto done_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) desc->payload = digest_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ret = mxs_dcp_start_dma(actx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (rctx->fini)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) done_run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct ahash_request *req = ahash_request_cast(arq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) uint8_t *in_buf = sdcp->coh->sha_in_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) uint8_t *out_buf = sdcp->coh->sha_out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct scatterlist *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) unsigned int i, len, clen, oft = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int fin = rctx->fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (fin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rctx->fini = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) len = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (actx->fill + len > DCP_BUF_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) clen = DCP_BUF_SZ - actx->fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) clen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) len -= clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) oft += clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) actx->fill += clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * If we filled the buffer and still have some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * more data, submit the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (len && actx->fill == DCP_BUF_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ret = mxs_dcp_run_sha(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) actx->fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rctx->init = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (fin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rctx->fini = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Submit whatever is left. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!req->result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ret = mxs_dcp_run_sha(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) actx->fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* For some reason the result is flipped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < halg->digestsize; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) req->result[i] = out_buf[halg->digestsize - i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static int dcp_chan_thread_sha(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) const int chan = DCP_CHAN_HASH_SHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct crypto_async_request *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct crypto_async_request *arq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) spin_lock(&sdcp->lock[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) backlog = crypto_get_backlog(&sdcp->queue[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) arq = crypto_dequeue_request(&sdcp->queue[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) spin_unlock(&sdcp->lock[chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!backlog && !arq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (arq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ret = dcp_sha_req_to_buf(arq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) arq->complete(arq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int dcp_sha_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Start hashing session. The code below only inits the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * hashing session context, nothing more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) memset(actx, 0, sizeof(*actx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (strcmp(halg->base.cra_name, "sha1") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) actx->fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) actx->hot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) actx->chan = DCP_CHAN_HASH_SHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) mutex_init(&actx->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int dcp_sha_update_fx(struct ahash_request *req, int fini)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct dcp *sdcp = global_sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Ignore requests that have no data in them and are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * the trailing requests in the stream of requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!req->nbytes && !fini)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) mutex_lock(&actx->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rctx->fini = fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!actx->hot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) actx->hot = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) rctx->init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_lock(&sdcp->lock[actx->chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) spin_unlock(&sdcp->lock[actx->chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) wake_up_process(sdcp->thread[actx->chan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) mutex_unlock(&actx->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int dcp_sha_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return dcp_sha_update_fx(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static int dcp_sha_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ahash_request_set_crypt(req, NULL, req->result, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) req->nbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return dcp_sha_update_fx(req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static int dcp_sha_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return dcp_sha_update_fx(req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int dcp_sha_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ret = dcp_sha_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return dcp_sha_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static int dcp_sha_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) const struct dcp_export_state *export = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) memset(actx, 0, sizeof(struct dcp_async_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static int dcp_sha_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct dcp_export_state *export = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int dcp_sha_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) sizeof(struct dcp_sha_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* AES 128 ECB and AES 128 CBC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static struct skcipher_alg dcp_aes_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) .base.cra_name = "ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) .base.cra_driver_name = "ecb-aes-dcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) .base.cra_alignmask = 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) .base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) .setkey = mxs_dcp_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) .encrypt = mxs_dcp_aes_ecb_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) .decrypt = mxs_dcp_aes_ecb_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) .init = mxs_dcp_aes_fallback_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) .exit = mxs_dcp_aes_fallback_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) .base.cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) .base.cra_driver_name = "cbc-aes-dcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) .base.cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) .base.cra_alignmask = 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) .base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) .setkey = mxs_dcp_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) .encrypt = mxs_dcp_aes_cbc_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) .decrypt = mxs_dcp_aes_cbc_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .init = mxs_dcp_aes_fallback_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) .exit = mxs_dcp_aes_fallback_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* SHA1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static struct ahash_alg dcp_sha1_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .init = dcp_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .update = dcp_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) .final = dcp_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) .finup = dcp_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) .digest = dcp_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .import = dcp_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) .export = dcp_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) .digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) .statesize = sizeof(struct dcp_export_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .cra_driver_name = "sha1-dcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) .cra_alignmask = 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) .cra_ctxsize = sizeof(struct dcp_async_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) .cra_init = dcp_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) .cra_exit = dcp_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* SHA256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static struct ahash_alg dcp_sha256_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .init = dcp_sha_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .update = dcp_sha_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) .final = dcp_sha_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) .finup = dcp_sha_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .digest = dcp_sha_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .import = dcp_sha_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) .export = dcp_sha_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .statesize = sizeof(struct dcp_export_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .cra_driver_name = "sha256-dcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .cra_priority = 400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .cra_alignmask = 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .cra_flags = CRYPTO_ALG_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .cra_ctxsize = sizeof(struct dcp_async_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .cra_init = dcp_sha_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .cra_exit = dcp_sha_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static irqreturn_t mxs_dcp_irq(int irq, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct dcp *sdcp = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) uint32_t stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) stat = readl(sdcp->base + MXS_DCP_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) stat &= MXS_DCP_STAT_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* Clear the interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* Complete the DMA requests that finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) for (i = 0; i < DCP_MAX_CHANS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (stat & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) complete(&sdcp->completion[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static int mxs_dcp_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct dcp *sdcp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) int dcp_vmi_irq, dcp_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (global_sdcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dev_err(dev, "Only one DCP instance allowed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) dcp_vmi_irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (dcp_vmi_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return dcp_vmi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) dcp_irq = platform_get_irq(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (dcp_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return dcp_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!sdcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) sdcp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) sdcp->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (IS_ERR(sdcp->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return PTR_ERR(sdcp->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) "dcp-vmi-irq", sdcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) "dcp-irq", sdcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) dev_err(dev, "Failed to claim DCP IRQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* Allocate coherent helper block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!sdcp->coh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Re-align the structure so it fits the DCP constraints. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* DCP clock is optional, only used on some SOCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) sdcp->dcp_clk = devm_clk_get(dev, "dcp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (IS_ERR(sdcp->dcp_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return PTR_ERR(sdcp->dcp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sdcp->dcp_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = clk_prepare_enable(sdcp->dcp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* Restart the DCP block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ret = stmp_reset_block(sdcp->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) dev_err(dev, "Failed reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) goto err_disable_unprepare_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* Initialize control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) sdcp->base + MXS_DCP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Enable all DCP DMA channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) sdcp->base + MXS_DCP_CHANNELCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * We do not enable context switching. Give the context buffer a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * pointer to an illegal address so if context switching is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * inadvertantly enabled, the DCP will return an error instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * trashing good memory. The DCP DMA cannot access ROM, so any ROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * address will do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) for (i = 0; i < DCP_MAX_CHANS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) global_sdcp = sdcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) platform_set_drvdata(pdev, sdcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) for (i = 0; i < DCP_MAX_CHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) spin_lock_init(&sdcp->lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) init_completion(&sdcp->completion[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) crypto_init_queue(&sdcp->queue[i], 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* Create the SHA and AES handler threads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) NULL, "mxs_dcp_chan/sha");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) dev_err(dev, "Error starting SHA thread!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) goto err_disable_unprepare_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) NULL, "mxs_dcp_chan/aes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dev_err(dev, "Error starting SHA thread!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) goto err_destroy_sha_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* Register the various crypto algorithms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = crypto_register_skciphers(dcp_aes_algs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ARRAY_SIZE(dcp_aes_algs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* Failed to register algorithm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) dev_err(dev, "Failed to register AES crypto!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) goto err_destroy_aes_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = crypto_register_ahash(&dcp_sha1_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_err(dev, "Failed to register %s hash!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) dcp_sha1_alg.halg.base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto err_unregister_aes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ret = crypto_register_ahash(&dcp_sha256_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dev_err(dev, "Failed to register %s hash!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dcp_sha256_alg.halg.base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto err_unregister_sha1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) err_unregister_sha1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) crypto_unregister_ahash(&dcp_sha1_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) err_unregister_aes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) err_destroy_aes_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) err_destroy_sha_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) err_disable_unprepare_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) clk_disable_unprepare(sdcp->dcp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static int mxs_dcp_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct dcp *sdcp = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) crypto_unregister_ahash(&dcp_sha256_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) crypto_unregister_ahash(&dcp_sha1_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) clk_disable_unprepare(sdcp->dcp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) platform_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) global_sdcp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static const struct of_device_id mxs_dcp_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) { .compatible = "fsl,imx23-dcp", .data = NULL, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) { .compatible = "fsl,imx28-dcp", .data = NULL, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static struct platform_driver mxs_dcp_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) .probe = mxs_dcp_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) .remove = mxs_dcp_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) .name = "mxs-dcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .of_match_table = mxs_dcp_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) module_platform_driver(mxs_dcp_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) MODULE_DESCRIPTION("Freescale MXS DCP Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) MODULE_ALIAS("platform:mxs-dcp");