^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Cryptographic API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Support for ATMEL SHA1/SHA256 HW acceleration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2012 Eukréa Electromatique - ATMEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Nicolas Royer <nicolas@eukrea.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Some ideas are from omap-sham.c drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/hw_random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "atmel-sha-regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "atmel-authenc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define ATMEL_SHA_PRIORITY 300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* SHA flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SHA_FLAGS_BUSY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SHA_FLAGS_FINAL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SHA_FLAGS_DMA_ACTIVE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SHA_FLAGS_OUTPUT_READY BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SHA_FLAGS_INIT BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SHA_FLAGS_CPU BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SHA_FLAGS_DMA_READY BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SHA_FLAGS_DUMP_REG BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* bits[11:8] are reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SHA_FLAGS_FINUP BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SHA_FLAGS_SG BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SHA_FLAGS_ERROR BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SHA_FLAGS_PAD BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SHA_FLAGS_RESTORE BIT(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SHA_FLAGS_IDATAR0 BIT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SHA_FLAGS_WAIT_DATARDY BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SHA_OP_INIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SHA_OP_UPDATE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SHA_OP_FINAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SHA_OP_DIGEST 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SHA_BUFFER_LEN (PAGE_SIZE / 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define ATMEL_SHA_DMA_THRESHOLD 56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct atmel_sha_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool has_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bool has_dualbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bool has_sha224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bool has_sha_384_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool has_uihv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bool has_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct atmel_sha_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * tested by the ahash_prepare_alg() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct atmel_sha_reqctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct atmel_sha_dev *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u64 digcnt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) size_t bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) size_t buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* walk state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int offset; /* offset in current sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int total; /* total request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) size_t block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct atmel_sha_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct atmel_sha_dev *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) atmel_sha_fn_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define ATMEL_SHA_QUEUE_LENGTH 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct atmel_sha_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct dma_slave_config dma_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned int last_sg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct atmel_sha_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct clk *iclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void __iomem *io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct tasklet_struct done_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct tasklet_struct queue_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct crypto_queue queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool is_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bool force_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) atmel_sha_fn_t resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) atmel_sha_fn_t cpu_transfer_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct atmel_sha_dma dma_lch_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct atmel_sha_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct scatterlist tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 hw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct atmel_sha_drv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static struct atmel_sha_drv atmel_sha = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #ifdef VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case SHA_CR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return "CR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case SHA_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return "MR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) case SHA_IER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return "IER";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case SHA_IDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return "IDR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) case SHA_IMR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return "IMR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) case SHA_ISR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return "ISR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) case SHA_MSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return "MSR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case SHA_BCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return "BCR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) case SHA_REG_DIN(0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case SHA_REG_DIN(1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) case SHA_REG_DIN(2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) case SHA_REG_DIN(3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) case SHA_REG_DIN(4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) case SHA_REG_DIN(5):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) case SHA_REG_DIN(6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case SHA_REG_DIN(7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) case SHA_REG_DIN(8):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case SHA_REG_DIN(9):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case SHA_REG_DIN(10):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case SHA_REG_DIN(11):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case SHA_REG_DIN(12):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case SHA_REG_DIN(13):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case SHA_REG_DIN(14):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case SHA_REG_DIN(15):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) case SHA_REG_DIGEST(0):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) case SHA_REG_DIGEST(1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) case SHA_REG_DIGEST(2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) case SHA_REG_DIGEST(3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case SHA_REG_DIGEST(4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case SHA_REG_DIGEST(5):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) case SHA_REG_DIGEST(6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) case SHA_REG_DIGEST(7):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) case SHA_REG_DIGEST(8):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case SHA_REG_DIGEST(9):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case SHA_REG_DIGEST(10):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) case SHA_REG_DIGEST(11):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) case SHA_REG_DIGEST(12):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) case SHA_REG_DIGEST(13):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) case SHA_REG_DIGEST(14):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) case SHA_REG_DIGEST(15):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) snprintf(tmp, sz, "IDATAR[%u]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) snprintf(tmp, sz, "ODATAR[%u]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) (offset - SHA_REG_DIGEST(0)) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) case SHA_HW_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return "HWVER";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) snprintf(tmp, sz, "0x%02x", offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif /* VERBOSE_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 value = readl_relaxed(dd->io_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #ifdef VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (dd->flags & SHA_FLAGS_DUMP_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) char tmp[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif /* VERBOSE_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void atmel_sha_write(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #ifdef VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (dd->flags & SHA_FLAGS_DUMP_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) char tmp[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #endif /* VERBOSE_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) writel_relaxed(value, dd->io_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) SHA_FLAGS_DUMP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) clk_disable(dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if ((dd->is_async || dd->force_complete) && req->base.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) req->base.complete(&req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* handle new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tasklet_schedule(&dd->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) count = min(ctx->sg->length - ctx->offset, ctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) count = min(count, ctx->buflen - ctx->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Check if count <= 0 because the buffer is full or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * because the sg length is 0. In the latest case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * check if there is another sg in the list, a 0 length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * sg doesn't necessarily mean the end of the sg list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ctx->offset, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ctx->bufcnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ctx->offset += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ctx->total -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (ctx->offset == ctx->sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (ctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ctx->total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * The purpose of this padding is to ensure that the padded message is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * The bit "1" is appended at the end of the message followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * 128 bits block (SHA384/SHA512) equals to the message length in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * is appended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * For SHA1/SHA224/SHA256, padlen is calculated as followed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * - if message length < 56 bytes then padlen = 56 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * - else padlen = 64 + 56 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * For SHA384/SHA512, padlen is calculated as followed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * - if message length < 112 bytes then padlen = 112 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * - else padlen = 128 + 112 - message length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned int index, padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __be64 bits[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u64 size[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) size[0] = ctx->digcnt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) size[1] = ctx->digcnt[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) size[0] += ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (size[0] < ctx->bufcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) size[1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) size[0] += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (size[0] < length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) size[1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bits[1] = cpu_to_be64(size[0] << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) index = ctx->bufcnt & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) padlen = (index < 112) ? (112 - index) : ((128+112) - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *(ctx->buffer + ctx->bufcnt) = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ctx->bufcnt += padlen + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ctx->flags |= SHA_FLAGS_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) index = ctx->bufcnt & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) padlen = (index < 56) ? (56 - index) : ((64+56) - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *(ctx->buffer + ctx->bufcnt) = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ctx->bufcnt += padlen + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ctx->flags |= SHA_FLAGS_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct atmel_sha_dev *dd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct atmel_sha_dev *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_lock_bh(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!tctx->dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dd = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) tctx->dd = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dd = tctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_unlock_bh(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static int atmel_sha_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ctx->dd = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ctx->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dev_dbg(dd->dev, "init: digest size: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) crypto_ahash_digestsize(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) switch (crypto_ahash_digestsize(tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case SHA1_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ctx->flags |= SHA_FLAGS_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ctx->block_size = SHA1_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case SHA224_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ctx->flags |= SHA_FLAGS_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ctx->block_size = SHA224_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case SHA256_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ctx->flags |= SHA_FLAGS_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ctx->block_size = SHA256_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) case SHA384_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ctx->flags |= SHA_FLAGS_SHA384;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ctx->block_size = SHA384_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) case SHA512_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ctx->flags |= SHA_FLAGS_SHA512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ctx->block_size = SHA512_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ctx->digcnt[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ctx->digcnt[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ctx->buflen = SHA_BUFFER_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u32 valmr = SHA_MR_MODE_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned int i, hashsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (likely(dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!dd->caps.has_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) valmr = SHA_MR_MODE_PDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (dd->caps.has_dualbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) valmr |= SHA_MR_DUALBUFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case SHA_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) valmr |= SHA_MR_ALGO_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) hashsize = SHA1_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) case SHA_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) valmr |= SHA_MR_ALGO_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) hashsize = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) case SHA_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) valmr |= SHA_MR_ALGO_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) hashsize = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) valmr |= SHA_MR_ALGO_SHA384;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) hashsize = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) valmr |= SHA_MR_ALGO_SHA512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) hashsize = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /* Setting CR_FIRST only for the first iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) const u32 *hash = (const u32 *)ctx->digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Restore the hardware context: update the User Initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Hash Value (UIHV) with the value saved when the latest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * 'update' operation completed on this very same crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ctx->flags &= ~SHA_FLAGS_RESTORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (i = 0; i < hashsize / sizeof(u32); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) valmr |= SHA_MR_UIHV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * WARNING: If the UIHV feature is not available, the hardware CANNOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * process concurrent requests: the internal registers used to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * the hash/digest are still set to the partial digest output values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * computed during the latest round.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) atmel_sha_write(dd, SHA_MR, valmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) atmel_sha_fn_t resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u32 isr = atmel_sha_read(dd, SHA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (unlikely(isr & SHA_INT_DATARDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dd->resume = resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) size_t length, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int count, len32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) const u32 *buffer = (const u32 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ctx->digcnt[1], ctx->digcnt[0], length, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) atmel_sha_write_ctrl(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* should be non-zero before next lines to disable clocks later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ctx->digcnt[0] += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (ctx->digcnt[0] < length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ctx->digcnt[1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) len32 = DIV_ROUND_UP(length, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dd->flags |= SHA_FLAGS_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (count = 0; count < len32; count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int len32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ctx->digcnt[1], ctx->digcnt[0], length1, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) len32 = DIV_ROUND_UP(length1, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) atmel_sha_write(dd, SHA_TPR, dma_addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) atmel_sha_write(dd, SHA_TCR, len32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) len32 = DIV_ROUND_UP(length2, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) atmel_sha_write(dd, SHA_TNPR, dma_addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) atmel_sha_write(dd, SHA_TNCR, len32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) atmel_sha_write_ctrl(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* should be non-zero before next lines to disable clocks later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ctx->digcnt[0] += length1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (ctx->digcnt[0] < length1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ctx->digcnt[1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dd->flags |= SHA_FLAGS_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Start DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void atmel_sha_dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct atmel_sha_dev *dd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dd->is_async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* dma_lch_in - completed - wait DATRDY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct dma_async_tx_descriptor *in_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct scatterlist sg[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ctx->digcnt[1], ctx->digcnt[0], length1, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dd->dma_lch_in.dma_conf.src_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dd->dma_lch_in.dma_conf.dst_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (length2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) sg_init_table(sg, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) sg_dma_address(&sg[0]) = dma_addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) sg_dma_len(&sg[0]) = length1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sg_dma_address(&sg[1]) = dma_addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) sg_dma_len(&sg[1]) = length2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) sg_init_table(sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) sg_dma_address(&sg[0]) = dma_addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) sg_dma_len(&sg[0]) = length1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!in_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) in_desc->callback = atmel_sha_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) in_desc->callback_param = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) atmel_sha_write_ctrl(dd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* should be non-zero before next lines to disable clocks later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ctx->digcnt[0] += length1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ctx->digcnt[0] < length1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ctx->digcnt[1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dd->flags |= SHA_FLAGS_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Start DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dmaengine_submit(in_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dma_async_issue_pending(dd->dma_lch_in.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (dd->caps.has_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return atmel_sha_xmit_dma(dd, dma_addr1, length1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) dma_addr2, length2, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) dma_addr2, length2, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) atmel_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) atmel_sha_fill_padding(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bufcnt = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct atmel_sha_reqctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) size_t length, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ctx->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ctx->flags &= ~SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* next call does not fail... so no unmap in the case of error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) unsigned int final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) atmel_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) atmel_sha_fill_padding(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (final || (ctx->bufcnt == ctx->buflen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return atmel_sha_xmit_dma_map(dd, ctx, count, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned int length, final, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!ctx->total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (ctx->bufcnt || ctx->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return atmel_sha_update_dma_slow(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) sg = ctx->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!IS_ALIGNED(sg->offset, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return atmel_sha_update_dma_slow(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* size is not ctx->block_size aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return atmel_sha_update_dma_slow(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) length = min(ctx->total, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (sg_is_last(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!(ctx->flags & SHA_FLAGS_FINUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* not last sg must be ctx->block_size aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) tail = length & (ctx->block_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) length -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ctx->total -= length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ctx->offset = length; /* offset where to start slow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Add padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (final) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) tail = length & (ctx->block_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) length -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ctx->total += tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ctx->offset = length; /* offset where to start slow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) sg = ctx->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) atmel_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) atmel_sha_fill_padding(ctx, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) dev_err(dd->dev, "dma %zu bytes error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ctx->buflen + ctx->block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (length == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ctx->flags &= ~SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 0, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ctx->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (!dma_map_sg(dd->dev, ctx->sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) DMA_TO_DEVICE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev_err(dd->dev, "dma_map_sg error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ctx->flags |= SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) length, ctx->dma_addr, count, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dev_err(dd->dev, "dma_map_sg error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ctx->flags |= SHA_FLAGS_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* next call does not fail... so no unmap in the case of error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 0, final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (ctx->flags & SHA_FLAGS_SG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (ctx->sg->length == ctx->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (ctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (ctx->flags & SHA_FLAGS_PAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dma_unmap_single(dd->dev, ctx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ctx->block_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int atmel_sha_update_req(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (ctx->flags & SHA_FLAGS_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) err = atmel_sha_update_cpu(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) err = atmel_sha_update_dma_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* wait for dma completion before can take more data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) err, ctx->digcnt[1], ctx->digcnt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int atmel_sha_final_req(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) atmel_sha_fill_padding(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* faster to handle last block with cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) atmel_sha_fill_padding(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) count = ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dev_dbg(dd->dev, "final_req: err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static void atmel_sha_copy_hash(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) u32 *hash = (u32 *)ctx->digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) unsigned int i, hashsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case SHA_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) hashsize = SHA1_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) case SHA_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) case SHA_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) hashsize = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) hashsize = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Should not happen... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) for (i = 0; i < hashsize / sizeof(u32); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ctx->flags |= SHA_FLAGS_RESTORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static void atmel_sha_copy_ready_hash(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!req->result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case SHA_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) case SHA_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) case SHA_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static int atmel_sha_finish(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct atmel_sha_dev *dd = ctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (ctx->digcnt[0] || ctx->digcnt[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) atmel_sha_copy_ready_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ctx->digcnt[0], ctx->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static void atmel_sha_finish_req(struct ahash_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct atmel_sha_dev *dd = ctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) atmel_sha_copy_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (SHA_FLAGS_FINAL & dd->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) err = atmel_sha_finish(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ctx->flags |= SHA_FLAGS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* atomic operation is not needed here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) (void)atmel_sha_complete(dd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) err = clk_enable(dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!(SHA_FLAGS_INIT & dd->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dd->flags |= SHA_FLAGS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) err = atmel_sha_hw_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) dd->hw_version = atmel_sha_get_version(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) dev_info(dd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) "version: 0x%x\n", dd->hw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) clk_disable(dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct crypto_async_request *async_req, *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct atmel_sha_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) bool start_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int err = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) spin_lock_irqsave(&dd->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ret = ahash_enqueue_request(&dd->queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (SHA_FLAGS_BUSY & dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) spin_unlock_irqrestore(&dd->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) backlog = crypto_get_backlog(&dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) async_req = crypto_dequeue_request(&dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dd->flags |= SHA_FLAGS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) spin_unlock_irqrestore(&dd->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (!async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ctx = crypto_tfm_ctx(async_req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) dd->req = ahash_request_cast(async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) start_async = (dd->req != req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) dd->is_async = start_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) dd->force_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* WARNING: ctx->start() MAY change dd->is_async. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) err = ctx->start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return (start_async) ? ret : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static int atmel_sha_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static int atmel_sha_start(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ctx->op, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) err = atmel_sha_hw_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return atmel_sha_complete(dd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * atmel_sha_update_req() and atmel_sha_final_req() can return either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * -EINPROGRESS: the hardware is busy and the SHA driver will resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * its job later in the done_task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * This is the main path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * 0: the SHA driver can continue its job then release the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * later, if needed, with atmel_sha_finish_req().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * This is the alternate path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * been called, hence the hardware has been released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * The SHA driver must stop its job without calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * called a second time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * Please note that currently, atmel_sha_final_req() never returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dd->resume = atmel_sha_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (ctx->op == SHA_OP_UPDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) err = atmel_sha_update_req(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (!err && (ctx->flags & SHA_FLAGS_FINUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* no final() after finup() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) err = atmel_sha_final_req(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) } else if (ctx->op == SHA_OP_FINAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) err = atmel_sha_final_req(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* done_task will not finish it, so do it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) atmel_sha_finish_req(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dev_dbg(dd->dev, "exit, err: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct atmel_sha_dev *dd = tctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ctx->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return atmel_sha_handle_queue(dd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static int atmel_sha_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ctx->total = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ctx->sg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (ctx->flags & SHA_FLAGS_FINUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* faster to use CPU for short transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ctx->flags |= SHA_FLAGS_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) atmel_sha_append_sg(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return atmel_sha_enqueue(req, SHA_OP_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int atmel_sha_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ctx->flags |= SHA_FLAGS_FINUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (ctx->flags & SHA_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return 0; /* uncompleted hash is not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (ctx->flags & SHA_FLAGS_PAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* copy ready hash (+ finalize hmac) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return atmel_sha_finish(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return atmel_sha_enqueue(req, SHA_OP_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int atmel_sha_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int err1, err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) ctx->flags |= SHA_FLAGS_FINUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) err1 = atmel_sha_update(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (err1 == -EINPROGRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) (err1 == -EBUSY && (ahash_request_flags(req) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) CRYPTO_TFM_REQ_MAY_BACKLOG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * final() has to be always called to cleanup resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * even if udpate() failed, except EINPROGRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) err2 = atmel_sha_final(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return err1 ?: err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static int atmel_sha_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return atmel_sha_init(req) ?: atmel_sha_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int atmel_sha_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) memcpy(out, ctx, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int atmel_sha_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) memcpy(ctx, in, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static int atmel_sha_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) sizeof(struct atmel_sha_reqctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ctx->start = atmel_sha_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void atmel_sha_alg_init(struct ahash_alg *alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) alg->halg.base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) alg->halg.base.cra_init = atmel_sha_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) alg->init = atmel_sha_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) alg->update = atmel_sha_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) alg->final = atmel_sha_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) alg->finup = atmel_sha_finup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) alg->digest = atmel_sha_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) alg->export = atmel_sha_export;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) alg->import = atmel_sha_import;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static struct ahash_alg sha_1_256_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .halg.base.cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) .halg.base.cra_driver_name = "atmel-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .halg.base.cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .halg.base.cra_driver_name = "atmel-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static struct ahash_alg sha_224_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .halg.base.cra_name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) .halg.base.cra_driver_name = "atmel-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static struct ahash_alg sha_384_512_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .halg.base.cra_name = "sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .halg.base.cra_driver_name = "atmel-sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .halg.base.cra_alignmask = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .halg.base.cra_name = "sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .halg.base.cra_driver_name = "atmel-sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .halg.base.cra_alignmask = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static void atmel_sha_queue_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) atmel_sha_handle_queue(dd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static int atmel_sha_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (SHA_FLAGS_CPU & dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) } else if (SHA_FLAGS_DMA_READY & dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) atmel_sha_update_dma_stop(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* hash or semi-hash ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) dd->flags &= ~(SHA_FLAGS_DMA_READY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) SHA_FLAGS_OUTPUT_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) err = atmel_sha_update_dma_start(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* finish curent request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) atmel_sha_finish_req(dd->req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void atmel_sha_done_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) dd->is_async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) (void)dd->resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct atmel_sha_dev *sha_dd = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) reg = atmel_sha_read(sha_dd, SHA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) atmel_sha_write(sha_dd, SHA_IDR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (SHA_FLAGS_BUSY & sha_dd->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (!(SHA_FLAGS_CPU & sha_dd->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) sha_dd->flags |= SHA_FLAGS_DMA_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) tasklet_schedule(&sha_dd->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* DMA transfer functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct atmel_sha_dma *dma = &dd->dma_lch_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) size_t bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) for (nents = 0; sg; sg = sg_next(sg), ++nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!IS_ALIGNED(sg->offset, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * This is the last sg, the only one that is allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * have an unaligned length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (len <= sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) dma->nents = nents + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) dma->last_sg_length = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) sg->length = ALIGN(len, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /* All other sg lengths MUST be aligned to the block size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (!IS_ALIGNED(sg->length, bs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) len -= sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static void atmel_sha_dma_callback2(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct atmel_sha_dev *dd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct atmel_sha_dma *dma = &dd->dma_lch_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) sg = dma->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) for (nents = 0; nents < dma->nents - 1; ++nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sg->length = dma->last_sg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) dd->is_async = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) (void)atmel_sha_wait_for_data_ready(dd, dd->resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct scatterlist *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) atmel_sha_fn_t resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct atmel_sha_dma *dma = &dd->dma_lch_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct dma_slave_config *config = &dma->dma_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct dma_chan *chan = dma->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) unsigned int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dd->resume = resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * dma->nents has already been initialized by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * atmel_sha_dma_check_aligned().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) dma->sg = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) config->src_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) config->dst_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) err = dmaengine_slave_config(chan, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) desc->callback = atmel_sha_dma_callback2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) desc->callback_param = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) err = dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) goto unmap_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) unmap_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return atmel_sha_complete(dd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /* CPU transfer functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) const u32 *words = (const u32 *)ctx->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) size_t i, num_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) u32 isr, din, din_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) /* Write data into the Input Data Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ctx->offset += ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ctx->total -= ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!ctx->total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * Prepare next block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * Fill ctx->buffer now with the next data to be written into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * IDATARx: it gives time for the SHA hardware to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * the current data so the SHA_INT_DATARDY flag might be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * in SHA_ISR when polling this register at the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * the next loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) ctx->offset, ctx->bufcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /* Wait for hardware to be ready again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) isr = atmel_sha_read(dd, SHA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (!(isr & SHA_INT_DATARDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* Not ready yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) dd->resume = atmel_sha_cpu_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return dd->cpu_transfer_complete(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) bool idatar0_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) bool wait_data_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) atmel_sha_fn_t resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (idatar0_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ctx->flags |= SHA_FLAGS_IDATAR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (wait_data_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) ctx->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ctx->total = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /* Prepare the first block to be written. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ctx->offset, ctx->bufcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) dd->cpu_transfer_complete = resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return atmel_sha_cpu_transfer(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) const void *data, unsigned int datalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) bool auto_padding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) atmel_sha_fn_t resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) u32 msglen = (auto_padding) ? datalen : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) u32 mr = SHA_MR_MODE_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) atmel_sha_write(dd, SHA_MR, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) atmel_sha_write(dd, SHA_MSR, msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) atmel_sha_write(dd, SHA_BCR, msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) sg_init_one(&dd->tmp, data, datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /* hmac functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct atmel_sha_hmac_key {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) unsigned int keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) u8 buffer[SHA512_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) u8 *keydup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) memset(hkey, 0, sizeof(*hkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) kfree(hkey->keydup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) memset(hkey, 0, sizeof(*hkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) atmel_sha_hmac_key_release(hkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (keylen > sizeof(hkey->buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (!hkey->keydup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) memcpy(hkey->buffer, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) hkey->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) hkey->keylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) const u8 **key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) unsigned int *keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (!hkey->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) *keylen = hkey->keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) *key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct atmel_sha_hmac_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct atmel_sha_ctx base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct atmel_sha_hmac_key hkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) u32 ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) u32 opad[SHA512_BLOCK_SIZE / sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) atmel_sha_fn_t resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) atmel_sha_fn_t resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) const u8 *key, unsigned int keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) atmel_sha_fn_t resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) unsigned int keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) const u8 *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) size_t bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) hmac->resume = resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) case SHA_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ctx->block_size = SHA1_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) ctx->hash_size = SHA1_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case SHA_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) ctx->block_size = SHA224_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ctx->hash_size = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) case SHA_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ctx->block_size = SHA256_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ctx->hash_size = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) ctx->block_size = SHA384_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ctx->hash_size = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ctx->block_size = SHA512_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) ctx->hash_size = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* Compute K' from K. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (unlikely(keylen > bs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return atmel_sha_hmac_prehash_key(dd, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* Prepare ipad. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) memcpy((u8 *)hmac->ipad, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return atmel_sha_hmac_compute_ipad_hash(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return atmel_sha_cpu_hash(dd, key, keylen, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) atmel_sha_hmac_prehash_key_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) size_t ds = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) size_t bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) size_t i, num_words = ds / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /* Prepare ipad. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) memset((u8 *)hmac->ipad + ds, 0, bs - ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return atmel_sha_hmac_compute_ipad_hash(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) size_t bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) size_t i, num_words = bs / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) memcpy(hmac->opad, hmac->ipad, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) for (i = 0; i < num_words; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) hmac->ipad[i] ^= 0x36363636;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) hmac->opad[i] ^= 0x5c5c5c5c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) atmel_sha_hmac_compute_opad_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) size_t bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) size_t hs = ctx->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) size_t i, num_words = hs / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) atmel_sha_hmac_setup_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) size_t hs = ctx->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) size_t i, num_words = hs / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) atmel_sha_hmac_key_release(&hmac->hkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return hmac->resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) err = atmel_sha_hw_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return atmel_sha_complete(dd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) switch (ctx->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) case SHA_OP_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) case SHA_OP_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) dd->resume = atmel_sha_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) err = atmel_sha_update_req(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) case SHA_OP_FINAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dd->resume = atmel_sha_hmac_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) err = atmel_sha_final_req(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) case SHA_OP_DIGEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static int atmel_sha_hmac_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) err = atmel_sha_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return atmel_sha_enqueue(req, SHA_OP_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) size_t bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) size_t hs = ctx->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) ctx->digcnt[0] = bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) ctx->digcnt[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) ctx->flags |= SHA_FLAGS_RESTORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) memcpy(ctx->digest, hmac->ipad, hs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return atmel_sha_complete(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) u32 *digest = (u32 *)ctx->digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) size_t ds = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) size_t bs = ctx->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) size_t hs = ctx->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) size_t i, num_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u32 mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) /* Save d = SHA((K' + ipad) | msg). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) num_words = ds / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /* Restore context to finish computing SHA((K' + opad) | d). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) num_words = hs / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) atmel_sha_write(dd, SHA_MR, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) atmel_sha_write(dd, SHA_MSR, bs + ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) atmel_sha_write(dd, SHA_BCR, ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) sg_init_one(&dd->tmp, digest, ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) atmel_sha_hmac_final_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * req->result might not be sizeof(u32) aligned, so copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * digest into ctx->digest[] before memcpy() the data into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * req->result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) atmel_sha_copy_hash(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) atmel_sha_copy_ready_hash(dd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return atmel_sha_complete(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static int atmel_sha_hmac_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) err = atmel_sha_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return atmel_sha_enqueue(req, SHA_OP_DIGEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) size_t hs = ctx->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) size_t i, num_words = hs / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) bool use_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) u32 mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /* Special case for empty message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return atmel_sha_complete(dd, -EINVAL); // TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /* Check DMA threshold and alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) use_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /* Write both initial hash values to compute a HMAC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* Write the Mode, Message Size, Bytes Count then Control Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) mr |= SHA_MR_MODE_IDATAR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) mr |= SHA_MR_MODE_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) atmel_sha_write(dd, SHA_MR, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) atmel_sha_write(dd, SHA_MSR, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) atmel_sha_write(dd, SHA_BCR, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) /* Process data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) return atmel_sha_dma_start(dd, req->src, req->nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) atmel_sha_hmac_final_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) atmel_sha_hmac_final_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) sizeof(struct atmel_sha_reqctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) hmac->base.start = atmel_sha_hmac_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) atmel_sha_hmac_key_init(&hmac->hkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) atmel_sha_hmac_key_release(&hmac->hkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) alg->halg.base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) alg->init = atmel_sha_hmac_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) alg->update = atmel_sha_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) alg->final = atmel_sha_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) alg->digest = atmel_sha_hmac_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) alg->setkey = atmel_sha_hmac_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) alg->export = atmel_sha_export;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) alg->import = atmel_sha_import;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static struct ahash_alg sha_hmac_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) .halg.base.cra_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) .halg.base.cra_driver_name = "atmel-hmac-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) .halg.base.cra_name = "hmac(sha224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) .halg.base.cra_driver_name = "atmel-hmac-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) .halg.base.cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) .halg.base.cra_driver_name = "atmel-hmac-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) .halg.base.cra_name = "hmac(sha384)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) .halg.base.cra_driver_name = "atmel-hmac-sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) .halg.base.cra_name = "hmac(sha512)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) .halg.base.cra_driver_name = "atmel-hmac-sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* authenc functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct atmel_sha_authenc_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct crypto_ahash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) struct atmel_sha_authenc_reqctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct atmel_sha_reqctx base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) atmel_aes_authenc_fn_t cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct atmel_aes_dev *aes_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* _init() parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct scatterlist *assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) u32 assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) u32 textlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /* _final() parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) u32 *digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) unsigned int digestlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct ahash_request *req = areq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * Force atmel_sha_complete() to call req->base.complete(), ie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) dd->force_complete = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) err = atmel_sha_hw_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return authctx->cb(authctx->aes_dev, err, dd->is_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) bool atmel_sha_authenc_is_ready(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) struct atmel_sha_ctx dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) dummy.dd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return (atmel_sha_find_dev(&dummy) != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) unsigned int atmel_sha_authenc_get_reqsize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return sizeof(struct atmel_sha_authenc_reqctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct atmel_sha_authenc_ctx *auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct crypto_ahash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) struct atmel_sha_ctx *tctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) switch (mode & SHA_FLAGS_MODE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) case SHA_FLAGS_HMAC_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) name = "atmel-hmac-sha1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) case SHA_FLAGS_HMAC_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) name = "atmel-hmac-sha224";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) case SHA_FLAGS_HMAC_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) name = "atmel-hmac-sha256";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) case SHA_FLAGS_HMAC_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) name = "atmel-hmac-sha384";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) case SHA_FLAGS_HMAC_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) name = "atmel-hmac-sha512";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) tfm = crypto_alloc_ahash(name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (IS_ERR(tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) err = PTR_ERR(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) tctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) tctx->start = atmel_sha_authenc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) tctx->flags = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) auth = kzalloc(sizeof(*auth), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (!auth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto err_free_ahash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) auth->tfm = tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) return auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) err_free_ahash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) crypto_free_ahash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (auth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) crypto_free_ahash(auth->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) kfree(auth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) const u8 *key, unsigned int keylen, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct crypto_ahash *tfm = auth->tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return crypto_ahash_setkey(tfm, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) int atmel_sha_authenc_schedule(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) struct atmel_sha_authenc_ctx *auth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) atmel_aes_authenc_fn_t cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) struct atmel_aes_dev *aes_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct atmel_sha_reqctx *ctx = &authctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct crypto_ahash *tfm = auth->tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct atmel_sha_dev *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) /* Reset request context (MUST be done first). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) memset(authctx, 0, sizeof(*authctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /* Get SHA device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) dd = atmel_sha_find_dev(tctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return cb(aes_dev, -ENODEV, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) /* Init request context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) ctx->dd = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) ctx->buflen = SHA_BUFFER_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) authctx->cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) authctx->aes_dev = aes_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) ahash_request_set_tfm(req, tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return atmel_sha_handle_queue(dd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) int atmel_sha_authenc_init(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct scatterlist *assoc, unsigned int assoclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) unsigned int textlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) atmel_aes_authenc_fn_t cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) struct atmel_aes_dev *aes_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) struct atmel_sha_reqctx *ctx = &authctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) struct atmel_sha_dev *dd = ctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) authctx->cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) authctx->aes_dev = aes_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) authctx->assoc = assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) authctx->assoclen = assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) authctx->textlen = textlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) ctx->flags = hmac->base.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct atmel_sha_reqctx *ctx = &authctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) size_t hs = ctx->hash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) size_t i, num_words = hs / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) u32 mr, msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) mr = (SHA_MR_MODE_IDATAR0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) SHA_MR_HMAC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) SHA_MR_DUALBUFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) atmel_sha_write(dd, SHA_MR, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) msg_size = authctx->assoclen + authctx->textlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) atmel_sha_write(dd, SHA_MSR, msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) atmel_sha_write(dd, SHA_BCR, msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /* Process assoc data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) true, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) atmel_sha_authenc_init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) return authctx->cb(authctx->aes_dev, 0, dd->is_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) int atmel_sha_authenc_final(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) u32 *digest, unsigned int digestlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) atmel_aes_authenc_fn_t cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) struct atmel_aes_dev *aes_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct atmel_sha_reqctx *ctx = &authctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) struct atmel_sha_dev *dd = ctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) case SHA_FLAGS_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) authctx->digestlen = SHA1_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) case SHA_FLAGS_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) authctx->digestlen = SHA224_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) case SHA_FLAGS_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) authctx->digestlen = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) case SHA_FLAGS_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) authctx->digestlen = SHA384_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) case SHA_FLAGS_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) authctx->digestlen = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) return atmel_sha_complete(dd, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (authctx->digestlen > digestlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) authctx->digestlen = digestlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) authctx->cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) authctx->aes_dev = aes_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) authctx->digest = digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return atmel_sha_wait_for_data_ready(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) atmel_sha_authenc_final_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct ahash_request *req = dd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) size_t i, num_words = authctx->digestlen / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) for (i = 0; i < num_words; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return atmel_sha_complete(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) void atmel_sha_authenc_abort(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct atmel_sha_reqctx *ctx = &authctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct atmel_sha_dev *dd = ctx->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) /* Prevent atmel_sha_complete() from calling req->base.complete(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) dd->is_async = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) dd->force_complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) (void)atmel_sha_complete(dd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (dd->caps.has_hmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) crypto_unregister_ahash(&sha_hmac_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) crypto_unregister_ahash(&sha_1_256_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (dd->caps.has_sha224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) crypto_unregister_ahash(&sha_224_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (dd->caps.has_sha_384_512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) crypto_unregister_ahash(&sha_384_512_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) int err, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) atmel_sha_alg_init(&sha_1_256_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) err = crypto_register_ahash(&sha_1_256_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) goto err_sha_1_256_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (dd->caps.has_sha224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) atmel_sha_alg_init(&sha_224_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) err = crypto_register_ahash(&sha_224_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) goto err_sha_224_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (dd->caps.has_sha_384_512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) atmel_sha_alg_init(&sha_384_512_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) err = crypto_register_ahash(&sha_384_512_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) goto err_sha_384_512_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (dd->caps.has_hmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) err = crypto_register_ahash(&sha_hmac_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) goto err_sha_hmac_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /*i = ARRAY_SIZE(sha_hmac_algs);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) err_sha_hmac_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) crypto_unregister_ahash(&sha_hmac_algs[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) i = ARRAY_SIZE(sha_384_512_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) err_sha_384_512_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) crypto_unregister_ahash(&sha_384_512_algs[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) crypto_unregister_ahash(&sha_224_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) err_sha_224_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) i = ARRAY_SIZE(sha_1_256_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) err_sha_1_256_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) crypto_unregister_ahash(&sha_1_256_algs[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (IS_ERR(dd->dma_lch_in.chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) dev_err(dd->dev, "DMA channel is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) return PTR_ERR(dd->dma_lch_in.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) SHA_REG_DIN(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) dd->dma_lch_in.dma_conf.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) dd->dma_lch_in.dma_conf.src_addr_width =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) dd->dma_lch_in.dma_conf.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) dd->dma_lch_in.dma_conf.dst_addr_width =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) dd->dma_lch_in.dma_conf.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) dma_release_channel(dd->dma_lch_in.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) dd->caps.has_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) dd->caps.has_dualbuff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) dd->caps.has_sha224 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) dd->caps.has_sha_384_512 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) dd->caps.has_uihv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) dd->caps.has_hmac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /* keep only major version number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) switch (dd->hw_version & 0xff0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) case 0x510:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) dd->caps.has_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) dd->caps.has_dualbuff = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) dd->caps.has_sha224 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) dd->caps.has_sha_384_512 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) dd->caps.has_uihv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) dd->caps.has_hmac = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) case 0x420:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) dd->caps.has_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) dd->caps.has_dualbuff = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) dd->caps.has_sha224 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) dd->caps.has_sha_384_512 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) dd->caps.has_uihv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) case 0x410:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) dd->caps.has_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) dd->caps.has_dualbuff = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) dd->caps.has_sha224 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) dd->caps.has_sha_384_512 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) case 0x400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) dd->caps.has_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) dd->caps.has_dualbuff = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) dd->caps.has_sha224 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) case 0x320:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) dev_warn(dd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) "Unmanaged sha version, set minimum capabilities\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) #if defined(CONFIG_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static const struct of_device_id atmel_sha_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) { .compatible = "atmel,at91sam9g46-sha" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) static int atmel_sha_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) struct atmel_sha_dev *sha_dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) struct resource *sha_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (!sha_dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) sha_dd->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) platform_set_drvdata(pdev, sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) INIT_LIST_HEAD(&sha_dd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) spin_lock_init(&sha_dd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) (unsigned long)sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) (unsigned long)sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) /* Get the base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (!sha_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) dev_err(dev, "no MEM resource info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) sha_dd->phys_base = sha_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /* Get the IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) sha_dd->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (sha_dd->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) err = sha_dd->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) IRQF_SHARED, "atmel-sha", sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) dev_err(dev, "unable to request sha irq.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) /* Initializing the clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (IS_ERR(sha_dd->iclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) dev_err(dev, "clock initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) err = PTR_ERR(sha_dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (IS_ERR(sha_dd->io_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) dev_err(dev, "can't ioremap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) err = PTR_ERR(sha_dd->io_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) err = clk_prepare(sha_dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) err = atmel_sha_hw_version_init(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) goto err_iclk_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) atmel_sha_get_cap(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (sha_dd->caps.has_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) err = atmel_sha_dma_init(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) goto err_iclk_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) dev_info(dev, "using %s for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) dma_chan_name(sha_dd->dma_lch_in.chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) spin_lock(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) spin_unlock(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) err = atmel_sha_register_algs(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) sha_dd->caps.has_sha224 ? "/SHA224" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) spin_lock(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) list_del(&sha_dd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) spin_unlock(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (sha_dd->caps.has_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) atmel_sha_dma_cleanup(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) err_iclk_unprepare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) clk_unprepare(sha_dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) err_tasklet_kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) tasklet_kill(&sha_dd->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) tasklet_kill(&sha_dd->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) static int atmel_sha_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) struct atmel_sha_dev *sha_dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) sha_dd = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (!sha_dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) spin_lock(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) list_del(&sha_dd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) spin_unlock(&atmel_sha.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) atmel_sha_unregister_algs(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) tasklet_kill(&sha_dd->queue_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) tasklet_kill(&sha_dd->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (sha_dd->caps.has_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) atmel_sha_dma_cleanup(sha_dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) clk_unprepare(sha_dd->iclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static struct platform_driver atmel_sha_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) .probe = atmel_sha_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) .remove = atmel_sha_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) .name = "atmel_sha",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) .of_match_table = of_match_ptr(atmel_sha_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) module_platform_driver(atmel_sha_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");