^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "cipher.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "regs-v5.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "sha.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static inline u32 qce_read(struct qce_device *qce, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return readl(qce->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) writel(val, qce->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline void qce_write_array(struct qce_device *qce, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) const u32 *val, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) qce_write(qce, offset + i * sizeof(u32), val[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) qce_write(qce, offset + i * sizeof(u32), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static u32 qce_config_reg(struct qce_device *qce, int little)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 beats = (qce->burst_size >> 3) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 pipe_pair = qce->pipe_pair_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) config &= ~HIGH_SPD_EN_N_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (little)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __be32 *d = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) const u8 *s = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) n = len / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) for (; n > 0; n--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *d = cpu_to_be32p((const __u32 *) s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) s += sizeof(__u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void qce_setup_config(struct qce_device *qce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* get big endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) config = qce_config_reg(qce, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* clear status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) qce_write(qce, REG_STATUS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) qce_write(qce, REG_CONFIG, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline void qce_crypto_go(struct qce_device *qce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (IS_CCM(flags) || IS_CMAC(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (key_size == AES_KEYSIZE_128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) else if (key_size == AES_KEYSIZE_256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else if (IS_CMAC(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (IS_SHA1(flags) || IS_SHA256(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) IS_CBC(flags) || IS_CTR(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) else if (IS_AES(flags) && IS_CCM(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) else if (IS_AES(flags) && IS_CMAC(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (IS_SHA(flags) || IS_SHA_HMAC(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (IS_CCM(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) IS_CMAC(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 totallen, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct ahash_request *req = ahash_request_cast(async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct qce_device *qce = tmpl->qce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned int digestsize = crypto_ahash_digestsize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 auth_cfg = 0, config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned int iv_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* if not the last, the size has to be on the block boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!rctx->last_blk && req->nbytes % blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) qce_setup_config(qce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (IS_CMAC(rctx->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) qce_write(qce, REG_AUTH_SEG_CFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) qce_write(qce, REG_ENCR_SEG_CFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) qce_write(qce, REG_ENCR_SEG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) qce_clear_array(qce, REG_AUTH_IV0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) qce_clear_array(qce, REG_AUTH_KEY0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 authkey_words = rctx->authklen / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) authkey_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (IS_CMAC(rctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto go_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (rctx->first_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) memcpy(auth, rctx->digest, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (rctx->first_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) qce_write_array(qce, REG_AUTH_BYTECNT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) (u32 *)rctx->byte_count, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) auth_cfg = qce_auth_cfg(rctx->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (rctx->last_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) auth_cfg |= BIT(AUTH_LAST_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (rctx->first_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) auth_cfg |= BIT(AUTH_FIRST_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) go_proc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) qce_write(qce, REG_AUTH_SEG_START, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) qce_write(qce, REG_ENCR_SEG_CFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) qce_write(qce, REG_SEG_SIZE, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* get little endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) config = qce_config_reg(qce, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) qce_write(qce, REG_CONFIG, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) qce_crypto_go(qce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (IS_AES(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (aes_key_size == AES_KEYSIZE_128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) else if (aes_key_size == AES_KEYSIZE_256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (IS_AES(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) else if (IS_DES(flags) || IS_3DES(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (IS_DES(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (IS_3DES(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) switch (flags & QCE_MODE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case QCE_MODE_ECB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case QCE_MODE_CBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case QCE_MODE_CTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case QCE_MODE_XTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case QCE_MODE_CCM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u8 swap[QCE_AES_IV_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (ivsize > QCE_AES_IV_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) memset(swap, 0, QCE_AES_IV_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) i < QCE_AES_IV_LENGTH; i++, j--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) swap[i] = src[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int enckeylen, unsigned int cryptlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned int xtsdusize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) enckeylen / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* xts du size 512B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u32 totallen, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct skcipher_request *req = skcipher_request_cast(async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct qce_device *qce = tmpl->qce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int enckey_words, enciv_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned int keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) u32 encr_cfg = 0, auth_cfg = 0, config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned int ivsize = rctx->ivsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long flags = rctx->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) qce_setup_config(qce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (IS_XTS(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) keylen = ctx->enc_keylen / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) keylen = ctx->enc_keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) enckey_words = keylen / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) encr_cfg = qce_encr_cfg(flags, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (IS_DES(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) enciv_words = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) enckey_words = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } else if (IS_3DES(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) enciv_words = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) enckey_words = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } else if (IS_AES(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (IS_XTS(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) rctx->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) enciv_words = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!IS_ECB(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (IS_XTS(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) qce_xts_swapiv(enciv, rctx->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (IS_ENCRYPT(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) encr_cfg |= BIT(ENCODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (IS_CTR(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) qce_write(qce, REG_CNTR_MASK, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) qce_write(qce, REG_CNTR_MASK0, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) qce_write(qce, REG_CNTR_MASK1, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) qce_write(qce, REG_CNTR_MASK2, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) qce_write(qce, REG_SEG_SIZE, totallen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* get little endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) config = qce_config_reg(qce, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) qce_write(qce, REG_CONFIG, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) qce_crypto_go(qce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return qce_setup_regs_skcipher(async_req, totallen, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return qce_setup_regs_ahash(async_req, totallen, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #define STATUS_ERRORS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int qce_check_status(struct qce_device *qce, u32 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) *status = qce_read(qce, REG_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Don't use result dump status. The operation may not be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Instead, use the status we just read from device. In case, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * use result_status from result dump the result_status needs to be byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * swapped, since we set the device to little endian.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) val = qce_read(qce, REG_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }