^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011-2012 International Business Machines Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Kent Yoder <yoder1@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "nx_csbcpb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "nx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) err = nx_crypto_ctx_sha_init(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) nx_ctx_init(nx_ctx, HCOP_FC_SHA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int nx_sha512_init(struct shash_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct sha512_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) memset(sctx, 0, sizeof *sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) sctx->state[0] = __cpu_to_be64(SHA512_H0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) sctx->state[1] = __cpu_to_be64(SHA512_H1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) sctx->state[2] = __cpu_to_be64(SHA512_H2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) sctx->state[3] = __cpu_to_be64(SHA512_H3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) sctx->state[4] = __cpu_to_be64(SHA512_H4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) sctx->state[5] = __cpu_to_be64(SHA512_H5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) sctx->state[6] = __cpu_to_be64(SHA512_H6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) sctx->state[7] = __cpu_to_be64(SHA512_H7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) sctx->count[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct sha512_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct nx_sg *out_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u64 to_process, leftover = 0, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 max_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_lock_irqsave(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* 2 cases for total data len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (total < SHA512_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) memcpy(sctx->buf + buf_len, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) sctx->count[0] += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) max_sg_len = min_t(u64, nx_ctx->ap->sglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) nx_driver.of.max_sg_len/sizeof(struct nx_sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) max_sg_len = min_t(u64, max_sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) nx_ctx->ap->databytelen/NX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) data_len = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) &data_len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (data_len != SHA512_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int used_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct nx_sg *in_sg = nx_ctx->in_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) data_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) in_sg = nx_build_sg_list(in_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) (u8 *) sctx->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) &data_len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (data_len != buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) used_sgs = in_sg - nx_ctx->in_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * processed in this iteration. This value is restricted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * by sg list limits and number of sgs we already used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * for leftover data. (see above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * but because data may not be aligned, we need to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * for that too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) to_process = min_t(u64, total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) data_len = to_process - buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) in_sg = nx_build_sg_list(in_sg, (u8 *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) &data_len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (data_len != (to_process - buf_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) to_process = data_len + buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) leftover = total - to_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * we've hit the nx chip previously and we're updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * again, so copy over the partial digest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) memcpy(csbcpb->cpb.sha512.input_partial_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) csbcpb->cpb.sha512.message_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) atomic_inc(&(nx_ctx->stats->sha512_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) total -= to_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) data += to_process - buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) buf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) } while (leftover >= SHA512_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* copy the leftover back into the state struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (leftover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) memcpy(sctx->buf, data, leftover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) sctx->count[0] += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int nx_sha512_final(struct shash_desc *desc, u8 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct sha512_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct nx_sg *in_sg, *out_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 max_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u64 count0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) spin_lock_irqsave(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) max_sg_len = min_t(u64, nx_ctx->ap->sglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) nx_driver.of.max_sg_len/sizeof(struct nx_sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) max_sg_len = min_t(u64, max_sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) nx_ctx->ap->databytelen/NX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* final is represented by continuing the operation and indicating that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * this is not an intermediate operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* we've hit the nx chip previously, now we're finalizing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * so copy over the partial digest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) count0 = sctx->count[0] * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) csbcpb->cpb.sha512.message_bit_length_lo = count0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) len = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!nx_ctx->op.outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) atomic_inc(&(nx_ctx->stats->sha512_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int nx_sha512_export(struct shash_desc *desc, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct sha512_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) memcpy(out, sctx, sizeof(*sctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int nx_sha512_import(struct shash_desc *desc, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct sha512_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) memcpy(sctx, in, sizeof(*sctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct shash_alg nx_shash_sha512_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .init = nx_sha512_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .update = nx_sha512_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .final = nx_sha512_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .export = nx_sha512_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .import = nx_sha512_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .descsize = sizeof(struct sha512_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .statesize = sizeof(struct sha512_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .cra_name = "sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .cra_driver_name = "sha512-nx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .cra_ctxsize = sizeof(struct nx_crypto_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .cra_init = nx_crypto_ctx_sha512_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .cra_exit = nx_crypto_ctx_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) };