^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <crypto/internal/des.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "picoxcell_crypto_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * The threshold for the number of entries in the CMD FIFO available before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * number of interrupts raised to the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CMD0_IRQ_THRESHOLD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * When there are packets in flight but lower than the threshold, we enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * the timer and at expiry, attempt to remove any processed packets from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * queue and if there are still packets left, schedule the timer again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PACKET_TIMEOUT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* The priority to register each algorithm with. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SPACC_CRYPTO_ALG_PRIORITY 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SPACC_CRYPTO_L2_HASH_PG_SZ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SPACC_CRYPTO_L2_MAX_CTXS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SPACC_CRYPTO_L2_FIFO_SZ 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define MAX_DDT_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* DDT format. This must match the hardware DDT format exactly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct spacc_ddt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dma_addr_t p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Asynchronous crypto request structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * This structure defines a request that is either queued for processing or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct spacc_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct spacc_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct crypto_async_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool is_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dma_addr_t src_addr, dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct spacc_ddt *src_ddt, *dst_ddt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void (*complete)(struct spacc_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct skcipher_request fallback_req; // keep at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct spacc_aead {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long ctrl_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct aead_alg alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct spacc_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int key_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int iv_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct spacc_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct list_head pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int next_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) spinlock_t hw_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct list_head completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct list_head in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct tasklet_struct complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned long fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void __iomem *cipher_ctx_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void __iomem *hash_key_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct spacc_alg *algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned num_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct list_head registered_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct spacc_aead *aeads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned num_aeads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct list_head registered_aeads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) size_t cipher_pg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) size_t hash_pg_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned max_ctxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct timer_list packet_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned stat_irq_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct dma_pool *req_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Algorithm type mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SPACC_CRYPTO_ALG_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* SPACC definition of a crypto algorithm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct spacc_alg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long ctrl_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct skcipher_alg alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct spacc_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int key_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int iv_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Generic context structure for any algorithm type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct spacc_generic_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct spacc_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int key_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int iv_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Block cipher context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct spacc_ablk_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct spacc_generic_ctx generic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u8 key[AES_MAX_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u8 key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * The fallback cipher. If the operation can't be done in hardware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * fallback to a software version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct crypto_skcipher *sw_cipher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* AEAD cipher context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct spacc_aead_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct spacc_generic_ctx generic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u8 cipher_key[AES_MAX_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u8 cipher_key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u8 hash_key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct crypto_aead *sw_cipher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static int spacc_ablk_submit(struct spacc_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return container_of(alg, struct spacc_aead, alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return fifo_stat & SPA_FIFO_CMD_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Given a cipher context, and a context number, get the base address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * context page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Returns the address of the context page where the key/context may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned indx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool is_cipher_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) (indx * ctx->engine->cipher_pg_sz) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* The context pages can only be written with 32-bit accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) const u32 *src32 = (const u32 *) src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) writel(*src32++, dst++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void __iomem *page_addr, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) size_t key_len, const u8 *iv, size_t iv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void __iomem *key_ptr = page_addr + ctx->key_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void __iomem *iv_ptr = page_addr + ctx->iv_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) memcpy_toio32(key_ptr, key, key_len / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) memcpy_toio32(iv_ptr, iv, iv_len / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Load a context into the engines context memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Returns the index of the context page where the context was loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) const u8 *ciph_key, size_t ciph_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) const u8 *iv, size_t ivlen, const u8 *hash_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) size_t hash_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned indx = ctx->engine->next_ctx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void __iomem *ciph_page_addr, *hash_page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ivlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) (1 << SPA_KEY_SZ_CIPHER_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (hash_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return indx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ddt->p = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ddt->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Take a crypto request and scatterlists for the data and turn them into DDTs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * for passing to the crypto engines. This also DMA maps the data so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * crypto engines can DMA to/from them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct scatterlist *payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) dma_addr_t *ddt_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned mapped_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct scatterlist *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct spacc_ddt *ddt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) nents = sg_nents_for_len(payload, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (nents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dev_err(engine->dev, "Invalid numbers of SG.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (mapped_ents + 1 > MAX_DDT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!ddt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) for_each_sg(payload, cur, mapped_ents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ddt_set(&ddt[mapped_ents], 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ddt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dma_unmap_sg(engine->dev, payload, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int spacc_aead_make_ddts(struct aead_request *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct crypto_aead *aead = crypto_aead_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct spacc_req *req = aead_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct spacc_engine *engine = req->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct spacc_ddt *src_ddt, *dst_ddt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int src_nents, dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct scatterlist *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int i, dst_ents, src_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) total = areq->assoclen + areq->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (req->is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) total += crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) src_nents = sg_nents_for_len(areq->src, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (src_nents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dev_err(engine->dev, "Invalid numbers of src SG.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (src_nents + 1 > MAX_DDT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dst_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (areq->src != areq->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dst_nents = sg_nents_for_len(areq->dst, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (dst_nents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev_err(engine->dev, "Invalid numbers of dst SG.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (src_nents + 1 > MAX_DDT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!src_ddt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!dst_ddt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto err_free_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) req->src_ddt = src_ddt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) req->dst_ddt = dst_ddt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (dst_nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!src_ents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) goto err_free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!dst_ents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dma_unmap_sg(engine->dev, areq->src, src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto err_free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (!src_ents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) goto err_free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dst_ents = src_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Now map in the payload for the source and destination and terminate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * with the NULL pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) for_each_sg(areq->src, cur, src_ents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* For decryption we need to skip the associated data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) total = req->is_encrypt ? 0 : areq->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for_each_sg(areq->dst, cur, dst_ents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned len = sg_dma_len(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (len <= total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) total -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ddt_set(src_ddt, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ddt_set(dst_ddt, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) err_free_dst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) err_free_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void spacc_aead_free_ddts(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct aead_request *areq = container_of(req->req, struct aead_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct crypto_aead *aead = crypto_aead_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned total = areq->assoclen + areq->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct spacc_engine *engine = aead_ctx->generic.engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int nents = sg_nents_for_len(areq->src, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* sg_nents_for_len should not fail since it works when mapping sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (unlikely(nents < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev_err(engine->dev, "Invalid numbers of src SG.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (areq->src != areq->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) nents = sg_nents_for_len(areq->dst, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (unlikely(nents < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) dev_err(engine->dev, "Invalid numbers of dst SG.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dma_addr_t ddt_addr, struct scatterlist *payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned nbytes, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int nents = sg_nents_for_len(payload, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (nents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) dev_err(req->engine->dev, "Invalid numbers of SG.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dma_unmap_sg(req->engine->dev, payload, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct crypto_authenc_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (keys.enckeylen > AES_MAX_KEY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (keys.authkeylen > sizeof(ctx->hash_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ctx->cipher_key_len = keys.enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ctx->hash_key_len = keys.authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) badkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int spacc_aead_setauthsize(struct crypto_aead *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Check if an AEAD request requires a fallback operation. Some requests can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * be completed in hardware because the hardware may not support certain key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * sizes. In these cases we need to complete the request in software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int spacc_aead_need_fallback(struct aead_request *aead_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct aead_alg *alg = crypto_aead_alg(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct spacc_aead *spacc_alg = to_spacc_aead(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * If we have a non-supported key-length, then we need to do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * software fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) SPA_CTRL_CIPH_ALG_AES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ctx->cipher_key_len != AES_KEYSIZE_128 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ctx->cipher_key_len != AES_KEYSIZE_256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) bool is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct aead_request *subreq = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) aead_request_set_tfm(subreq, ctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) aead_request_set_callback(subreq, req->base.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) req->base.complete, req->base.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) aead_request_set_ad(subreq, req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return is_encrypt ? crypto_aead_encrypt(subreq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) crypto_aead_decrypt(subreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static void spacc_aead_complete(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spacc_aead_free_ddts(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) req->req->complete(req->req, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int spacc_aead_submit(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct aead_request *aead_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) container_of(req->req, struct aead_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned int authsize = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct aead_alg *alg = crypto_aead_alg(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct spacc_aead *spacc_alg = to_spacc_aead(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct spacc_engine *engine = ctx->generic.engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) u32 ctrl, proc_len, assoc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) req->result = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ctx->hash_ctx, ctx->hash_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Set the source and destination DDT pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) assoc_len = aead_req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) proc_len = aead_req->cryptlen + assoc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * If we are decrypting, we need to take the length of the ICV out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * the processing length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!req->is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) proc_len -= authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) (1 << SPA_CTRL_ICV_APPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (req->is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ctrl |= (1 << SPA_CTRL_KEY_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int spacc_req_submit(struct spacc_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static void spacc_push(struct spacc_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct spacc_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) while (!list_empty(&engine->pending) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) engine->in_flight + 1 <= engine->fifo_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ++engine->in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) req = list_first_entry(&engine->pending, struct spacc_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) list_move_tail(&req->list, &engine->in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) req->result = spacc_req_submit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * Setup an AEAD request for processing. This will configure the engine, load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * the context and then start the packet processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int spacc_aead_setup(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned alg_type, bool is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct aead_alg *alg = crypto_aead_alg(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct spacc_engine *engine = to_spacc_aead(alg)->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct spacc_req *dev_req = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dev_req->req = &req->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_req->is_encrypt = is_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dev_req->result = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dev_req->engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev_req->complete = spacc_aead_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (unlikely(spacc_aead_need_fallback(req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return spacc_aead_do_fallback(req, alg_type, is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_lock_irqsave(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (unlikely(spacc_fifo_cmd_full(engine)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) engine->in_flight + 1 > engine->fifo_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_unlock_irqrestore(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) goto out_free_ddts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) list_add_tail(&dev_req->list, &engine->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) list_add_tail(&dev_req->list, &engine->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) spacc_push(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) spin_unlock_irqrestore(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) out_free_ddts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spacc_aead_free_ddts(dev_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static int spacc_aead_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return spacc_aead_setup(req, alg->type, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int spacc_aead_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return spacc_aead_setup(req, alg->type, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Initialise a new AEAD context. This is responsible for allocating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * fallback cipher and initialising the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int spacc_aead_cra_init(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct aead_alg *alg = crypto_aead_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct spacc_aead *spacc_alg = to_spacc_aead(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct spacc_engine *engine = spacc_alg->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ctx->generic.flags = spacc_alg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ctx->generic.engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (IS_ERR(ctx->sw_cipher))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return PTR_ERR(ctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ctx->generic.key_offs = spacc_alg->key_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ctx->generic.iv_offs = spacc_alg->iv_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) crypto_aead_set_reqsize(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) max(sizeof(struct spacc_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) sizeof(struct aead_request) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) crypto_aead_reqsize(ctx->sw_cipher)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Destructor for an AEAD context. This is called when the transform is freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * and must free the fallback cipher.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static void spacc_aead_cra_exit(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) crypto_free_aead(ctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Set the DES key for a block cipher transform. This also performs weak key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * checking if the transform has requested it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) err = verify_skcipher_des_key(cipher, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) memcpy(ctx->key, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ctx->key_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Set the 3DES key for a block cipher transform. This also performs weak key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * checking if the transform has requested it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) err = verify_skcipher_des3_key(cipher, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) memcpy(ctx->key, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ctx->key_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Set the key for an AES block cipher. Some key lengths are not supported in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * hardware so this must also check whether a fallback is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (len > AES_MAX_KEY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * IPSec engine only supports 128 and 256 bit AES keys. If we get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * request for any other size (192 bits) then we need to do a software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!ctx->sw_cipher)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Set the fallback transform to use the same request flags as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * the hardware transform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) crypto_skcipher_clear_flags(ctx->sw_cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) crypto_skcipher_set_flags(ctx->sw_cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) cipher->base.crt_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto sw_setkey_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) memcpy(ctx->key, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ctx->key_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) sw_setkey_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) const u8 *key, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (len > AES_MAX_KEY_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) memcpy(ctx->key, key, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) ctx->key_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int spacc_ablk_need_fallback(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct spacc_ablk_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) SPA_CTRL_CIPH_ALG_AES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ctx->key_len != AES_KEYSIZE_128 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ctx->key_len != AES_KEYSIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static void spacc_ablk_complete(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (ablk_req->src != ablk_req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ablk_req->cryptlen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ablk_req->cryptlen, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ablk_req->cryptlen, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) req->req->complete(req->req, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int spacc_ablk_submit(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct spacc_engine *engine = ctx->generic.engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ctx->key_len, ablk_req->iv, alg->ivsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) (1 << SPA_CTRL_KEY_EXP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static int spacc_ablk_do_fallback(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) unsigned alg_type, bool is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct crypto_tfm *old_tfm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct spacc_req *dev_req = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Change the request to use the software fallback transform, and once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * the ciphering has completed, put the old transform back into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) req->base.complete, req->base.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) req->cryptlen, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) crypto_skcipher_decrypt(&dev_req->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bool is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct spacc_req *dev_req = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) dev_req->req = &req->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dev_req->is_encrypt = is_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dev_req->engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) dev_req->complete = spacc_ablk_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dev_req->result = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (unlikely(spacc_ablk_need_fallback(dev_req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * Create the DDT's for the engine. If we share the same source and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * destination then we can optimize by reusing the DDT's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (req->src != req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!dev_req->src_ddt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!dev_req->dst_ddt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) goto out_free_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!dev_req->dst_ddt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) dev_req->src_ddt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dev_req->src_addr = dev_req->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) err = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) spin_lock_irqsave(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Check if the engine will accept the operation now. If it won't then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * we either stick it on the end of a pending list if we can backlog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * or bailout with an error if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (unlikely(spacc_fifo_cmd_full(engine)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) engine->in_flight + 1 > engine->fifo_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) spin_unlock_irqrestore(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto out_free_ddts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) list_add_tail(&dev_req->list, &engine->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) list_add_tail(&dev_req->list, &engine->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) spacc_push(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_unlock_irqrestore(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) out_free_ddts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) req->cryptlen, req->src == req->dst ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) out_free_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (req->src != req->dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) req->src, req->cryptlen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct spacc_engine *engine = spacc_alg->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ctx->generic.flags = spacc_alg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ctx->generic.engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (IS_ERR(ctx->sw_cipher)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dev_warn(engine->dev, "failed to allocate fallback for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) alg->base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return PTR_ERR(ctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) crypto_skcipher_reqsize(ctx->sw_cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* take the size without the fallback skcipher_request at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) fallback_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ctx->generic.key_offs = spacc_alg->key_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ctx->generic.iv_offs = spacc_alg->iv_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) crypto_free_skcipher(ctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int spacc_ablk_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return spacc_ablk_setup(req, spacc_alg->type, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int spacc_ablk_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return spacc_ablk_setup(req, spacc_alg->type, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) SPA_FIFO_STAT_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static void spacc_process_done(struct spacc_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct spacc_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) spin_lock_irqsave(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) while (!spacc_fifo_stat_empty(engine)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) req = list_first_entry(&engine->in_progress, struct spacc_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) list_move_tail(&req->list, &engine->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) --engine->in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /* POP the status register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Convert the SPAcc error status into the standard POSIX error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (unlikely(req->result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) switch (req->result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) case SPA_STATUS_ICV_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) req->result = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) case SPA_STATUS_MEMORY_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) dev_warn(engine->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) "memory error triggered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) req->result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) case SPA_STATUS_BLOCK_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dev_warn(engine->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) "block error triggered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) req->result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) tasklet_schedule(&engine->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) spin_unlock_irqrestore(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static irqreturn_t spacc_spacc_irq(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct spacc_engine *engine = (struct spacc_engine *)dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) spacc_process_done(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static void spacc_packet_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) spacc_process_done(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int spacc_req_submit(struct spacc_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct crypto_alg *alg = req->req->tfm->__crt_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return spacc_aead_submit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return spacc_ablk_submit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void spacc_spacc_complete(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct spacc_engine *engine = (struct spacc_engine *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct spacc_req *req, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) LIST_HEAD(completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) spin_lock_irqsave(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) list_splice_init(&engine->completed, &completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) spacc_push(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (engine->in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) spin_unlock_irqrestore(&engine->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) list_for_each_entry_safe(req, tmp, &completed, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) req->complete(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static int spacc_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct spacc_engine *engine = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * We only support standby mode. All we have to do is gate the clock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * the spacc. The hardware will preserve state until we turn it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * on again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) clk_disable(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int spacc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct spacc_engine *engine = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return clk_enable(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static const struct dev_pm_ops spacc_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .suspend = spacc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) .resume = spacc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return dev ? dev_get_drvdata(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) struct spacc_engine *engine = spacc_dev_to_engine(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct spacc_engine *engine = spacc_dev_to_engine(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) unsigned long thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (kstrtoul(buf, 0, &thresh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) engine->stat_irq_thresh = thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) spacc_stat_irq_thresh_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static struct spacc_alg ipsec_engine_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .key_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) .iv_offs = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) .base.cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) .base.cra_driver_name = "cbc-aes-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) .setkey = spacc_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) .key_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) .iv_offs = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) .base.cra_name = "ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .base.cra_driver_name = "ecb-aes-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .setkey = spacc_aes_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) .base.cra_name = "cbc(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .base.cra_driver_name = "cbc-des-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) .base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .setkey = spacc_des_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) .min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .base.cra_name = "ecb(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) .base.cra_driver_name = "ecb-des-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) .setkey = spacc_des_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) .base.cra_name = "cbc(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) .base.cra_driver_name = "cbc-des3-ede-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) .setkey = spacc_des3_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) .max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) .base.cra_name = "ecb(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .base.cra_driver_name = "ecb-des3-ede-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) .setkey = spacc_des3_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) .min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) .max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static struct spacc_aead ipsec_engine_aeads[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) SPA_CTRL_CIPH_MODE_CBC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) SPA_CTRL_HASH_ALG_SHA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) SPA_CTRL_HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) .key_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) .iv_offs = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) .cra_name = "authenc(hmac(sha1),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) .cra_driver_name = "authenc-hmac-sha1-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) "cbc-aes-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .cra_ctxsize = sizeof(struct spacc_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) .setkey = spacc_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) .setauthsize = spacc_aead_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) .encrypt = spacc_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) .decrypt = spacc_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) .init = spacc_aead_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) .exit = spacc_aead_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) SPA_CTRL_CIPH_MODE_CBC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) SPA_CTRL_HASH_ALG_SHA256 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) SPA_CTRL_HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) .key_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) .iv_offs = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) .cra_name = "authenc(hmac(sha256),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) .cra_driver_name = "authenc-hmac-sha256-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) "cbc-aes-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) .cra_ctxsize = sizeof(struct spacc_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) .setkey = spacc_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) .setauthsize = spacc_aead_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) .encrypt = spacc_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) .decrypt = spacc_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) .init = spacc_aead_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) .exit = spacc_aead_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) .key_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) .iv_offs = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) SPA_CTRL_CIPH_MODE_CBC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) SPA_CTRL_HASH_ALG_MD5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) SPA_CTRL_HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) .cra_name = "authenc(hmac(md5),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) .cra_driver_name = "authenc-hmac-md5-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) "cbc-aes-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .cra_ctxsize = sizeof(struct spacc_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .setkey = spacc_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) .setauthsize = spacc_aead_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) .encrypt = spacc_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) .decrypt = spacc_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) .init = spacc_aead_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) .exit = spacc_aead_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) SPA_CTRL_CIPH_MODE_CBC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) SPA_CTRL_HASH_ALG_SHA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) SPA_CTRL_HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) .cra_driver_name = "authenc-hmac-sha1-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) "cbc-3des-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) .cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) .cra_ctxsize = sizeof(struct spacc_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) .setkey = spacc_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .setauthsize = spacc_aead_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .encrypt = spacc_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) .decrypt = spacc_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .init = spacc_aead_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) .exit = spacc_aead_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) SPA_CTRL_CIPH_MODE_CBC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) SPA_CTRL_HASH_ALG_SHA256 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) SPA_CTRL_HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) .cra_name = "authenc(hmac(sha256),"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) "cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) .cra_driver_name = "authenc-hmac-sha256-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) "cbc-3des-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) .cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) .cra_ctxsize = sizeof(struct spacc_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) .setkey = spacc_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) .setauthsize = spacc_aead_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) .encrypt = spacc_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) .decrypt = spacc_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) .init = spacc_aead_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) .exit = spacc_aead_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) .key_offs = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) .iv_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) SPA_CTRL_CIPH_MODE_CBC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) SPA_CTRL_HASH_ALG_MD5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) SPA_CTRL_HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .cra_driver_name = "authenc-hmac-md5-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) "cbc-3des-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) .cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .cra_ctxsize = sizeof(struct spacc_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) .setkey = spacc_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .setauthsize = spacc_aead_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) .encrypt = spacc_aead_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) .decrypt = spacc_aead_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) .init = spacc_aead_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) .exit = spacc_aead_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static struct spacc_alg l2_engine_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .key_offs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) SPA_CTRL_CIPH_MODE_F8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) .alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) .base.cra_name = "f8(kasumi)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) .base.cra_driver_name = "f8-kasumi-picoxcell",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) .base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) .base.cra_blocksize = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) .base.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) .setkey = spacc_kasumi_f8_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) .encrypt = spacc_ablk_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .decrypt = spacc_ablk_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) .min_keysize = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) .max_keysize = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) .ivsize = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) .init = spacc_ablk_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) .exit = spacc_ablk_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static const struct of_device_id spacc_of_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) { .compatible = "picochip,spacc-ipsec" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) { .compatible = "picochip,spacc-l2" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) MODULE_DEVICE_TABLE(of, spacc_of_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) #endif /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static void spacc_tasklet_kill(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) tasklet_kill(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static int spacc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int i, err, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct resource *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (!engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) engine->algs = ipsec_engine_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) engine->aeads = ipsec_engine_aeads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) } else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) engine->algs = l2_engine_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) engine->num_algs = ARRAY_SIZE(l2_engine_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) engine->name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) engine->regs = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (IS_ERR(engine->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return PTR_ERR(engine->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) dev_err(&pdev->dev, "no memory/irq resource for engine\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) tasklet_init(&engine->complete, spacc_spacc_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) (unsigned long)engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) &engine->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) engine->name, engine)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) dev_err(engine->dev, "failed to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) engine->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) engine->req_pool = dmam_pool_create(engine->name, engine->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (!engine->req_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) spin_lock_init(&engine->hw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) engine->clk = clk_get(&pdev->dev, "ref");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (IS_ERR(engine->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_info(&pdev->dev, "clk unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return PTR_ERR(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (clk_prepare_enable(engine->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) dev_info(&pdev->dev, "unable to prepare/enable clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) goto err_clk_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * Use an IRQ threshold of 50% as a default. This seems to be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * reasonable trade off of latency against throughput but can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * changed at runtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) engine->stat_irq_thresh = (engine->fifo_sz / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * Configure the interrupts. We only use the STAT_CNT interrupt as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * only submit a new packet for processing when we complete another in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * the queue. This minimizes time spent in the interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) engine->regs + SPA_IRQ_EN_REG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) INIT_LIST_HEAD(&engine->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) INIT_LIST_HEAD(&engine->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) INIT_LIST_HEAD(&engine->in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) engine->in_flight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) platform_set_drvdata(pdev, engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) INIT_LIST_HEAD(&engine->registered_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) for (i = 0; i < engine->num_algs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) engine->algs[i].engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) err = crypto_register_skcipher(&engine->algs[i].alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) list_add_tail(&engine->algs[i].entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) &engine->registered_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) dev_err(engine->dev, "failed to register alg \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) engine->algs[i].alg.base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) dev_dbg(engine->dev, "registered alg \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) engine->algs[i].alg.base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) INIT_LIST_HEAD(&engine->registered_aeads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) for (i = 0; i < engine->num_aeads; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) engine->aeads[i].engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) err = crypto_register_aead(&engine->aeads[i].alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) list_add_tail(&engine->aeads[i].entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) &engine->registered_aeads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) dev_err(engine->dev, "failed to register alg \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) engine->aeads[i].alg.base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) dev_dbg(engine->dev, "registered alg \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) engine->aeads[i].alg.base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) del_timer_sync(&engine->packet_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) err_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) clk_disable_unprepare(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) err_clk_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) clk_put(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static int spacc_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct spacc_aead *aead, *an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct spacc_alg *alg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct spacc_engine *engine = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) del_timer_sync(&engine->packet_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) list_del(&aead->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) crypto_unregister_aead(&aead->alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) list_del(&alg->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) crypto_unregister_skcipher(&alg->alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) clk_disable_unprepare(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) clk_put(engine->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) static struct platform_driver spacc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) .probe = spacc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) .remove = spacc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) .name = "picochip,spacc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) .pm = &spacc_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) .of_match_table = of_match_ptr(spacc_of_id_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) module_platform_driver(spacc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) MODULE_AUTHOR("Jamie Iles");