^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014 Imagination Technologies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Authors: Will Thomas, James Hartley
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Interface structure taken from omap-sham driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define CR_RESET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define CR_RESET_SET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define CR_RESET_UNSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define CR_MESSAGE_LENGTH_H 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define CR_MESSAGE_LENGTH_L 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define CR_CONTROL 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define CR_CONTROL_BYTE_ORDER_3210 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define CR_CONTROL_BYTE_ORDER_0123 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define CR_CONTROL_BYTE_ORDER_2310 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define CR_CONTROL_BYTE_ORDER_1032 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CR_CONTROL_BYTE_ORDER_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CR_CONTROL_ALGO_MD5 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CR_CONTROL_ALGO_SHA1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CR_CONTROL_ALGO_SHA224 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CR_CONTROL_ALGO_SHA256 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CR_INTSTAT 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CR_INTENAB 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CR_INTCLEAR 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CR_INT_RESULTS_AVAILABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CR_INT_NEW_RESULTS_SET BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CR_INT_RESULT_READ_ERR BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define CR_INT_STATUS BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CR_RESULT_QUEUE 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define CR_RSD0 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define CR_CORE_REV 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define CR_CORE_DES1 0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define CR_CORE_DES2 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define DRIVER_FLAGS_BUSY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define DRIVER_FLAGS_FINAL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DRIVER_FLAGS_INIT BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DRIVER_FLAGS_CPU BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DRIVER_FLAGS_DMA_READY BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DRIVER_FLAGS_ERROR BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define DRIVER_FLAGS_SG BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define DRIVER_FLAGS_SHA1 BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define DRIVER_FLAGS_SHA224 BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define DRIVER_FLAGS_SHA256 BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define DRIVER_FLAGS_MD5 BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define IMG_HASH_QUEUE_LENGTH 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define IMG_HASH_DMA_BURST 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define IMG_HASH_DMA_THRESHOLD 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct img_hash_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct img_hash_request_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct img_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size_t digsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) size_t dma_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* sg root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct scatterlist *sgfirst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* walk state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) size_t nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) size_t sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) size_t bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct ahash_request fallback_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Zero length buffer must remain last member of struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u8 buffer[] __aligned(sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct img_hash_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct img_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct crypto_ahash *fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct img_hash_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct clk *hash_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct clk *sys_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void __iomem *io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) phys_addr_t bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void __iomem *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct tasklet_struct done_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct tasklet_struct dma_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct crypto_queue queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct dma_chan *dma_lch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct img_hash_drv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct img_hash_drv img_hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return readl_relaxed(hdev->io_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void img_hash_write(struct img_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) writel_relaxed(value, hdev->io_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void img_hash_start(struct img_hash_dev *hdev, bool dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (ctx->flags & DRIVER_FLAGS_MD5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cr |= CR_CONTROL_ALGO_MD5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) else if (ctx->flags & DRIVER_FLAGS_SHA1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cr |= CR_CONTROL_ALGO_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else if (ctx->flags & DRIVER_FLAGS_SHA224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cr |= CR_CONTROL_ALGO_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) else if (ctx->flags & DRIVER_FLAGS_SHA256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cr |= CR_CONTROL_ALGO_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dev_dbg(hdev->dev, "Starting hash process\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) img_hash_write(hdev, CR_CONTROL, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * The hardware block requires two cycles between writing the control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * register and writing the first word of data in non DMA mode, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * ensure the first data write is not grouped in burst with the control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * register write a read is issued to 'flush' the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) img_hash_read(hdev, CR_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) size_t length, int final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u32 count, len32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) const u32 *buffer = (const u32 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) hdev->flags |= DRIVER_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) len32 = DIV_ROUND_UP(length, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) for (count = 0; count < len32; count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) writel_relaxed(buffer[count], hdev->cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void img_hash_dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct img_hash_dev *hdev = (struct img_hash_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ctx->bufcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (ctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) tasklet_schedule(&hdev->dma_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (ctx->dma_ct == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dev_err(hdev->dev, "Invalid DMA sg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) hdev->err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) desc = dmaengine_prep_slave_sg(hdev->dma_lch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ctx->dma_ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dev_err(hdev->dev, "Null DMA descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) hdev->err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) desc->callback = img_hash_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) desc->callback_param = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dma_async_issue_pending(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ctx->buffer, hdev->req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ctx->total = hdev->req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) img_hash_start(hdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int img_hash_finish(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!req->result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) memcpy(req->result, ctx->digest, ctx->digsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void img_hash_copy_hash(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 *hash = (u32 *)ctx->digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) hash[i] = img_hash_read_result_queue(ctx->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void img_hash_finish_req(struct ahash_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct img_hash_dev *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) img_hash_copy_hash(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (DRIVER_FLAGS_FINAL & hdev->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) err = img_hash_finish(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev_warn(hdev->dev, "Hash failed with error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ctx->flags |= DRIVER_FLAGS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (req->base.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) req->base.complete(&req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int img_hash_write_via_dma(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) img_hash_start(hdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!ctx->total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) hdev->flags |= DRIVER_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) tasklet_schedule(&hdev->dma_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int img_hash_dma_init(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct dma_slave_config dma_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (IS_ERR(hdev->dma_lch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return PTR_ERR(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dma_conf.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dma_conf.dst_addr = hdev->bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dma_conf.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) dma_release_channel(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void img_hash_dma_task(unsigned long d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct img_hash_dev *hdev = (struct img_hash_dev *)d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u8 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) size_t nbytes, bleft, wsend, len, tbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct scatterlist tsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!hdev->req || !ctx->sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) addr = sg_virt(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) nbytes = ctx->sg->length - ctx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * The hash accelerator does not support a data valid mask. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * padding bytes in the last word written by that dma would erroneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * be included in the hash. To avoid this we round down the transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * and add the excess to the start of the next dma. It does not matter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * that the final dma may not be a multiple of 4 bytes as the hashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * block is programmed to accept the correct number of bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) bleft = nbytes % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) wsend = (nbytes / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (wsend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (img_hash_xmit_dma(hdev, &tsg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dev_err(hdev->dev, "DMA failed, falling back to CPU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ctx->flags |= DRIVER_FLAGS_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) hdev->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) img_hash_xmit_cpu(hdev, addr + ctx->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) wsend * 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ctx->sent += wsend * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) wsend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ctx->sent += wsend * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (bleft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ctx->buffer, bleft, ctx->sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) tbc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) while (ctx->sg && (ctx->bufcnt < 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) len = ctx->sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (likely(len > (4 - ctx->bufcnt)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) len = 4 - ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ctx->buffer + ctx->bufcnt, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ctx->sent + ctx->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ctx->bufcnt += tbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (tbc >= ctx->sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tbc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ctx->sent += ctx->bufcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ctx->offset = tbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!wsend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) img_hash_dma_callback(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ctx->sg = sg_next(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (ctx->flags & DRIVER_FLAGS_SG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int img_hash_process_data(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct ahash_request *req = hdev->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) err = img_hash_write_via_dma(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) err = img_hash_write_via_cpu(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int img_hash_hw_init(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned long long nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u32 u, l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) img_hash_write(hdev, CR_RESET, CR_RESET_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) nbits = (u64)hdev->req->nbytes << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u = nbits >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) l = nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) hdev->flags |= DRIVER_FLAGS_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) hdev->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static int img_hash_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rctx->fallback_req.base.flags = req->base.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) & CRYPTO_TFM_REQ_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return crypto_ahash_init(&rctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int img_hash_handle_queue(struct img_hash_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct crypto_async_request *async_req, *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct img_hash_request_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int err = 0, res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spin_lock_irqsave(&hdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) res = ahash_enqueue_request(&hdev->queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (DRIVER_FLAGS_BUSY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_unlock_irqrestore(&hdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) backlog = crypto_get_backlog(&hdev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) async_req = crypto_dequeue_request(&hdev->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) hdev->flags |= DRIVER_FLAGS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) spin_unlock_irqrestore(&hdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) req = ahash_request_cast(async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) hdev->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ctx->op, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) err = img_hash_hw_init(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) err = img_hash_process_data(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (err != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* done_task will not finish so do it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) img_hash_finish_req(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static int img_hash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) rctx->fallback_req.base.flags = req->base.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) & CRYPTO_TFM_REQ_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rctx->fallback_req.nbytes = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rctx->fallback_req.src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return crypto_ahash_update(&rctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int img_hash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rctx->fallback_req.base.flags = req->base.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) & CRYPTO_TFM_REQ_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) rctx->fallback_req.result = req->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return crypto_ahash_final(&rctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int img_hash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rctx->fallback_req.base.flags = req->base.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) & CRYPTO_TFM_REQ_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rctx->fallback_req.nbytes = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rctx->fallback_req.src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) rctx->fallback_req.result = req->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return crypto_ahash_finup(&rctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int img_hash_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rctx->fallback_req.base.flags = req->base.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) & CRYPTO_TFM_REQ_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return crypto_ahash_import(&rctx->fallback_req, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int img_hash_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) rctx->fallback_req.base.flags = req->base.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) & CRYPTO_TFM_REQ_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return crypto_ahash_export(&rctx->fallback_req, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int img_hash_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct img_hash_dev *hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct img_hash_dev *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spin_lock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (!tctx->hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) list_for_each_entry(tmp, &img_hash.dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) hdev = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tctx->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) hdev = tctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) spin_unlock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ctx->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ctx->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ctx->digsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) switch (ctx->digsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) case SHA1_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ctx->flags |= DRIVER_FLAGS_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) case SHA256_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ctx->flags |= DRIVER_FLAGS_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case SHA224_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ctx->flags |= DRIVER_FLAGS_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case MD5_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ctx->flags |= DRIVER_FLAGS_MD5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ctx->bufcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ctx->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ctx->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ctx->total = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ctx->sg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ctx->sgfirst = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ctx->nents = sg_nents(ctx->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) err = img_hash_handle_queue(tctx->hdev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ctx->fallback = crypto_alloc_ahash(alg_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (IS_ERR(ctx->fallback)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pr_err("img_hash: Could not load fallback driver.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) err = PTR_ERR(ctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) sizeof(struct img_hash_request_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) crypto_ahash_reqsize(ctx->fallback) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) IMG_HASH_DMA_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return img_hash_cra_init(tfm, "md5-generic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return img_hash_cra_init(tfm, "sha1-generic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return img_hash_cra_init(tfm, "sha224-generic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return img_hash_cra_init(tfm, "sha256-generic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static void img_hash_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) crypto_free_ahash(tctx->fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static irqreturn_t img_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct img_hash_dev *hdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) reg = img_hash_read(hdev, CR_INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) img_hash_write(hdev, CR_INTCLEAR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (reg & CR_INT_NEW_RESULTS_SET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (DRIVER_FLAGS_BUSY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!(DRIVER_FLAGS_CPU & hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) hdev->flags |= DRIVER_FLAGS_DMA_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) tasklet_schedule(&hdev->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dev_warn(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) "HASH interrupt when no active requests.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) } else if (reg & CR_INT_RESULTS_AVAILABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dev_warn(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) "IRQ triggered before the hash had completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) } else if (reg & CR_INT_RESULT_READ_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev_warn(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) "Attempt to read from an empty result queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) dev_warn(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) "Data written before the hardware was configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static struct ahash_alg img_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .init = img_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .update = img_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .final = img_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .finup = img_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .export = img_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .import = img_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .digest = img_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .statesize = sizeof(struct md5_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) .cra_name = "md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .cra_driver_name = "img-md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .cra_ctxsize = sizeof(struct img_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .cra_init = img_hash_cra_md5_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .cra_exit = img_hash_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) .init = img_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) .update = img_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) .final = img_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .finup = img_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .export = img_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) .import = img_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) .digest = img_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) .statesize = sizeof(struct sha1_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) .cra_driver_name = "img-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) .cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) .cra_ctxsize = sizeof(struct img_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) .cra_init = img_hash_cra_sha1_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .cra_exit = img_hash_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) .init = img_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) .update = img_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .final = img_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .finup = img_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .export = img_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) .import = img_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) .digest = img_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) .digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .statesize = sizeof(struct sha256_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) .cra_name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) .cra_driver_name = "img-sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) .cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) .cra_ctxsize = sizeof(struct img_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) .cra_init = img_hash_cra_sha224_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) .cra_exit = img_hash_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) .init = img_hash_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) .update = img_hash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) .final = img_hash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) .finup = img_hash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) .export = img_hash_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) .import = img_hash_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) .digest = img_hash_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) .halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) .digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) .statesize = sizeof(struct sha256_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) .cra_driver_name = "img-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) .cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) CRYPTO_ALG_NEED_FALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) .cra_ctxsize = sizeof(struct img_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) .cra_init = img_hash_cra_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) .cra_exit = img_hash_cra_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int img_register_algs(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) err = crypto_register_ahash(&img_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) goto err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) err_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) for (; i--; )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) crypto_unregister_ahash(&img_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static int img_unregister_algs(struct img_hash_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) for (i = 0; i < ARRAY_SIZE(img_algs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) crypto_unregister_ahash(&img_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static void img_hash_done_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct img_hash_dev *hdev = (struct img_hash_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (hdev->err == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) err = hdev->err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) img_hash_handle_queue(hdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (DRIVER_FLAGS_CPU & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) img_hash_write_via_dma_stop(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (hdev->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) err = hdev->err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) DRIVER_FLAGS_OUTPUT_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) img_hash_finish_req(hdev->req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static const struct of_device_id img_hash_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) { .compatible = "img,hash-accelerator" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) MODULE_DEVICE_TABLE(of, img_hash_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static int img_hash_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct img_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct resource *hash_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (hdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) spin_lock_init(&hdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) hdev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) platform_set_drvdata(pdev, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) INIT_LIST_HEAD(&hdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* Register bank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (IS_ERR(hdev->io_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) err = PTR_ERR(hdev->io_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) dev_err(dev, "can't ioremap, returned %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* Write port (DMA or CPU) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (IS_ERR(hdev->cpu_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dev_err(dev, "can't ioremap write port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) err = PTR_ERR(hdev->cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) hdev->bus_addr = hash_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) err = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) err = devm_request_irq(dev, irq, img_irq_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) dev_name(dev), hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev_err(dev, "unable to request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_dbg(dev, "using IRQ channel %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (IS_ERR(hdev->hash_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dev_err(dev, "clock initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) err = PTR_ERR(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (IS_ERR(hdev->sys_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dev_err(dev, "clock initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) err = PTR_ERR(hdev->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) err = clk_prepare_enable(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto res_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) err = clk_prepare_enable(hdev->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto clk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) err = img_hash_dma_init(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto dma_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) dev_dbg(dev, "using %s for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dma_chan_name(hdev->dma_lch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) spin_lock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_add_tail(&hdev->list, &img_hash.dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) spin_unlock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) err = img_register_algs(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) spin_lock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) list_del(&hdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) spin_unlock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dma_release_channel(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dma_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) clk_disable_unprepare(hdev->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) clk_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) clk_disable_unprepare(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) res_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) tasklet_kill(&hdev->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) tasklet_kill(&hdev->dma_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int img_hash_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct img_hash_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) hdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) spin_lock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) list_del(&hdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) spin_unlock(&img_hash.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) img_unregister_algs(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) tasklet_kill(&hdev->done_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) tasklet_kill(&hdev->dma_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) dma_release_channel(hdev->dma_lch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) clk_disable_unprepare(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) clk_disable_unprepare(hdev->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int img_hash_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct img_hash_dev *hdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) clk_disable_unprepare(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) clk_disable_unprepare(hdev->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int img_hash_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct img_hash_dev *hdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ret = clk_prepare_enable(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ret = clk_prepare_enable(hdev->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) clk_disable_unprepare(hdev->hash_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static const struct dev_pm_ops img_hash_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static struct platform_driver img_hash_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .probe = img_hash_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .remove = img_hash_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .name = "img-hash-accelerator",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .pm = &img_hash_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .of_match_table = of_match_ptr(img_hash_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) module_platform_driver(img_hash_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) MODULE_AUTHOR("Will Thomas.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");