^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011-2012 International Business Machines Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Kent Yoder <yoder1@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "nx_csbcpb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "nx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) err = nx_crypto_ctx_sha_init(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) nx_ctx_init(nx_ctx, HCOP_FC_SHA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int nx_sha256_init(struct shash_desc *desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct sha256_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) memset(sctx, 0, sizeof *sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) sctx->state[0] = __cpu_to_be32(SHA256_H0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) sctx->state[1] = __cpu_to_be32(SHA256_H1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) sctx->state[2] = __cpu_to_be32(SHA256_H2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) sctx->state[3] = __cpu_to_be32(SHA256_H3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) sctx->state[4] = __cpu_to_be32(SHA256_H4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) sctx->state[5] = __cpu_to_be32(SHA256_H5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) sctx->state[6] = __cpu_to_be32(SHA256_H6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) sctx->state[7] = __cpu_to_be32(SHA256_H7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) sctx->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct sha256_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct nx_sg *out_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u64 to_process = 0, leftover, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 max_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_lock_irqsave(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* 2 cases for total data len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) total = (sctx->count % SHA256_BLOCK_SIZE) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (total < SHA256_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) memcpy(sctx->buf + buf_len, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) sctx->count += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) max_sg_len = min_t(u64, nx_ctx->ap->sglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) nx_driver.of.max_sg_len/sizeof(struct nx_sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) max_sg_len = min_t(u64, max_sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) nx_ctx->ap->databytelen/NX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) data_len = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) &data_len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (data_len != SHA256_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int used_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct nx_sg *in_sg = nx_ctx->in_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) data_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) in_sg = nx_build_sg_list(in_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) (u8 *) sctx->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) &data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (data_len != buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) used_sgs = in_sg - nx_ctx->in_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * processed in this iteration. This value is restricted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * by sg list limits and number of sgs we already used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * for leftover data. (see above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * but because data may not be aligned, we need to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * for that too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) to_process = min_t(u64, total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) data_len = to_process - buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) in_sg = nx_build_sg_list(in_sg, (u8 *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) &data_len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) to_process = data_len + buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) leftover = total - to_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * we've hit the nx chip previously and we're updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * again, so copy over the partial digest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) memcpy(csbcpb->cpb.sha256.input_partial_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) csbcpb->cpb.sha256.message_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) atomic_inc(&(nx_ctx->stats->sha256_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) total -= to_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) data += to_process - buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) buf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } while (leftover >= SHA256_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* copy the leftover back into the state struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (leftover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) memcpy(sctx->buf, data, leftover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) sctx->count += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int nx_sha256_final(struct shash_desc *desc, u8 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct sha256_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct nx_sg *in_sg, *out_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 max_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_lock_irqsave(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) max_sg_len = min_t(u64, nx_ctx->ap->sglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) nx_driver.of.max_sg_len/sizeof(struct nx_sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) max_sg_len = min_t(u64, max_sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) nx_ctx->ap->databytelen/NX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* final is represented by continuing the operation and indicating that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * this is not an intermediate operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (sctx->count >= SHA256_BLOCK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* we've hit the nx chip previously, now we're finalizing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * so copy over the partial digest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) len = sctx->count & (SHA256_BLOCK_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) &len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) len = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (len != SHA256_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!nx_ctx->op.outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) atomic_inc(&(nx_ctx->stats->sha256_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int nx_sha256_export(struct shash_desc *desc, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct sha256_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) memcpy(out, sctx, sizeof(*sctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int nx_sha256_import(struct shash_desc *desc, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct sha256_state *sctx = shash_desc_ctx(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) memcpy(sctx, in, sizeof(*sctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct shash_alg nx_shash_sha256_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .init = nx_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .update = nx_sha256_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .final = nx_sha256_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .export = nx_sha256_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .import = nx_sha256_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .descsize = sizeof(struct sha256_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .statesize = sizeof(struct sha256_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .cra_driver_name = "sha256-nx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .cra_ctxsize = sizeof(struct nx_crypto_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .cra_init = nx_crypto_ctx_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .cra_exit = nx_crypto_ctx_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) };