Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) /* \file cc_driver.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * ARM CryptoCell Linux Crypto Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef __CC_DRIVER_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define __CC_DRIVER_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifdef COMP_IN_WQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <crypto/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "cc_host_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "cc_crypto_ctx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "cc_hw_queue_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include "cc_sram_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) extern bool cc_dump_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) extern bool cc_dump_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define DRV_MODULE_VERSION "5.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) enum cc_hw_rev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	CC_HW_REV_630 = 630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	CC_HW_REV_710 = 710,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	CC_HW_REV_712 = 712,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	CC_HW_REV_713 = 713
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) enum cc_std_body {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	CC_STD_NIST = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	CC_STD_OSCCA = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	CC_STD_ALL = 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define CC_COHERENT_CACHE_PARAMS 0xEEE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define CC_PINS_FULL	0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define CC_PINS_SLIM	0x9F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* Maximum DMA mask supported by IP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define DMA_BIT_MASK_LEN 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			  (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			  (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			  (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define CC_CPP_AES_ABORT_MASK ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define CC_CPP_SM4_ABORT_MASK ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) /* Register name mangling macro */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /* TEE FIPS status interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define CC_CRA_PRIO 400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define MAX_REQUEST_QUEUE_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MAX_MLLI_BUFF_SIZE 2080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Definitions for HW descriptors DIN/DOUT fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define NS_BIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define AXI_ID 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * field in the HW descriptor. The DMA engine +8 that value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct cc_cpp_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	bool is_cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	enum cc_cpp_alg alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u8 slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define CC_MAX_IVGEN_DMA_ADDRESSES	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct cc_crypto_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	void (*user_cb)(struct device *dev, void *req, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	void *user_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	struct completion seq_compl; /* request completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct cc_cpp_req cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * struct cc_drvdata - driver private data context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * @cc_base:	virt address of the CC registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * @irq:	bitmap indicating source of last interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct cc_drvdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	void __iomem *cc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct completion hw_queue_avail; /* wait for HW queue availability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct platform_device *plat_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	u32 mlli_sram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	struct dma_pool *mlli_buffs_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	struct list_head alg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	void *hash_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	void *aead_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	void *request_mgr_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	void *fips_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	u32 sram_free_offset;	/* offset to non-allocated area in SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct dentry *dir;	/* for debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	bool coherent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	char *hw_rev_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	enum cc_hw_rev hw_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u32 axim_mon_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	u32 sig_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	u32 ver_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int std_bodies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	bool sec_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	u32 comp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct cc_crypto_alg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	int cipher_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	int flow_mode; /* Note: currently, refers to the cipher mode only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	int auth_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct cc_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct skcipher_alg skcipher_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	struct aead_alg aead_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct cc_alg_template {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	char name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	char driver_name[CRYPTO_MAX_ALG_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	unsigned int blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		struct skcipher_alg skcipher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		struct aead_alg aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	} template_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	int cipher_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	int flow_mode; /* Note: currently, refers to the cipher mode only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int auth_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	u32 min_hw_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	enum cc_std_body std_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	bool sec_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	unsigned int data_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct cc_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct async_gen_req_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	dma_addr_t iv_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	u8 *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	enum drv_crypto_direction op_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	return &drvdata->plat_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void __dump_byte_array(const char *name, const u8 *buf, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void dump_byte_array(const char *name, const u8 *the_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 				   size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (cc_dump_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		__dump_byte_array(name, the_array, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void fini_cc_regs(struct cc_drvdata *drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	iowrite32(val, (drvdata->cc_base + reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return ioread32(drvdata->cc_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				      struct cc_hw_desc *pdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (drvdata->hw_rev >= CC_HW_REV_712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		set_queue_last_ind_bit(pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif /*__CC_DRIVER_H__*/