^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * caam - Freescale FSL CAAM support for Public Key Cryptography
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2016 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2018-2019 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * all the desired key parameters, input and output pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "compat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "intern.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "jr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "desc_constr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "sg_sw_sec4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "caampkc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) SIZEOF_RSA_PRIV_F1_PDB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) SIZEOF_RSA_PRIV_F2_PDB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) SIZEOF_RSA_PRIV_F3_PDB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* buffer filled with zeros, used for padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static u8 *zero_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * variable used to avoid double free of resources in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * algorithm registration was unsuccessful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static bool init_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct caam_akcipher_alg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct akcipher_alg akcipher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bool registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (edesc->sec4_sg_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size_t p_sz = key->p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) size_t q_sz = key->q_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) size_t p_sz = key->p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t q_sz = key->q_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* RSA Job Completion handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct akcipher_request *req = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) bool has_bklog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ecode = caam_jr_strstatus(dev, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) edesc = req_ctx->edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) has_bklog = edesc->bklog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) rsa_pub_unmap(dev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rsa_io_unmap(dev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * If no backlog flag, the completion of the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * by CAAM, not crypto engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!has_bklog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) akcipher_request_complete(req, ecode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) crypto_finalize_akcipher_request(jrp->engine, req, ecode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct akcipher_request *req = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bool has_bklog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ecode = caam_jr_strstatus(dev, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) edesc = req_ctx->edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) has_bklog = edesc->bklog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) switch (key->priv_form) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case FORM1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) rsa_priv_f1_unmap(dev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case FORM2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) rsa_priv_f2_unmap(dev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case FORM3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rsa_priv_f3_unmap(dev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rsa_io_unmap(dev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * If no backlog flag, the completion of the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * by CAAM, not crypto engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!has_bklog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) akcipher_request_complete(req, ecode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) crypto_finalize_akcipher_request(jrp->engine, req, ecode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Count leading zeros, need it to strip, from a given scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @sgl : scatterlist to count zeros from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @nbytes: number of zeros, in bytes, to strip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @flags : operation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned int nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct sg_mapping_iter miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int lzeros, ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int tbytes = nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) const u8 *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ents = sg_nents_for_len(sgl, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (ents < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) lzeros = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) while (nbytes > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* do not strip more than given bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) while (len && !*buff && lzeros < nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) lzeros++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) buff++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (len && *buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sg_miter_next(&miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) buff = miter.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) len = miter.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) nbytes -= lzeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) lzeros = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) miter.consumed = lzeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) sg_miter_stop(&miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) nbytes -= lzeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return tbytes - nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) size_t desclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int src_nents, dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int mapped_src_nents, mapped_dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned int diff_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int lzeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (req->src_len > key->n_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * strip leading zeros and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * return the number of zeros to skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) key->n_sz, sg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (lzeros < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return ERR_PTR(lzeros);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) lzeros);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) req_ctx->fixup_src_len = req->src_len - lzeros;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * input src is less then n key modulus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * so there will be zero padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) diff_size = key->n_sz - req->src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) req_ctx->fixup_src = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) req_ctx->fixup_src_len = req->src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) src_nents = sg_nents_for_len(req_ctx->fixup_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) req_ctx->fixup_src_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) dst_nents = sg_nents_for_len(req->dst, req->dst_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (unlikely(!mapped_src_nents)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dev_err(dev, "unable to map source\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (unlikely(!mapped_dst_nents)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) dev_err(dev, "unable to map destination\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) goto src_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!diff_size && mapped_src_nents == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) sec4_sg_len = 0; /* no need for an input hw s/g table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sec4_sg_len = mapped_src_nents + !!diff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sec4_sg_index = sec4_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (mapped_dst_nents > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sec4_sg_len += pad_sg_nents(mapped_dst_nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sec4_sg_len = pad_sg_nents(sec4_sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* allocate space for base edesc, hw desc commands and link tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) GFP_DMA | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto dst_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (diff_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (sec4_sg_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) edesc->sec4_sg + !!diff_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (mapped_dst_nents > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) sg_to_sec4_sg_last(req->dst, req->dst_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) edesc->sec4_sg + sec4_sg_index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Save nents for later use in Job Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) edesc->src_nents = src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) edesc->dst_nents = dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) req_ctx->edesc = edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!sec4_sg_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) edesc->mapped_src_nents = mapped_src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) edesc->mapped_dst_nents = mapped_dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) sec4_sg_bytes, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_err(dev, "unable to map S/G table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto sec4_sg_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) edesc->sec4_sg_bytes = sec4_sg_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) edesc->sec4_sg_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sec4_sg_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dst_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) src_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct akcipher_request *req = container_of(areq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct akcipher_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct device *jrdev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) u32 *desc = req_ctx->edesc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) req_ctx->edesc->bklog = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (ret != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) rsa_pub_unmap(jrdev, req_ctx->edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rsa_io_unmap(jrdev, req_ctx->edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) kfree(req_ctx->edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int set_rsa_pub_pdb(struct akcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct rsa_edesc *edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int sec4_sg_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (dma_mapping_error(dev, pdb->n_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dev_err(dev, "Unable to map RSA modulus memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (dma_mapping_error(dev, pdb->e_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev_err(dev, "Unable to map RSA public exponent memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (edesc->mapped_src_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pdb->sgf |= RSA_PDB_SGF_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pdb->f_dma = edesc->sec4_sg_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sec4_sg_index += edesc->mapped_src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (edesc->mapped_dst_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pdb->sgf |= RSA_PDB_SGF_G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pdb->g_dma = edesc->sec4_sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) sec4_sg_index * sizeof(struct sec4_sg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pdb->g_dma = sg_dma_address(req->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pdb->f_len = req_ctx->fixup_src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct rsa_edesc *edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int sec4_sg_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (dma_mapping_error(dev, pdb->n_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_err(dev, "Unable to map modulus memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (dma_mapping_error(dev, pdb->d_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dev_err(dev, "Unable to map RSA private exponent memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (edesc->mapped_src_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pdb->sgf |= RSA_PRIV_PDB_SGF_G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) pdb->g_dma = edesc->sec4_sg_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) sec4_sg_index += edesc->mapped_src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (edesc->mapped_dst_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pdb->sgf |= RSA_PRIV_PDB_SGF_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pdb->f_dma = edesc->sec4_sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sec4_sg_index * sizeof(struct sec4_sg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) pdb->f_dma = sg_dma_address(req->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct rsa_edesc *edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int sec4_sg_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) size_t p_sz = key->p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) size_t q_sz = key->q_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (dma_mapping_error(dev, pdb->d_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dev_err(dev, "Unable to map RSA private exponent memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (dma_mapping_error(dev, pdb->p_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dev_err(dev, "Unable to map RSA prime factor p memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto unmap_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (dma_mapping_error(dev, pdb->q_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dev_err(dev, "Unable to map RSA prime factor q memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto unmap_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (dma_mapping_error(dev, pdb->tmp1_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_err(dev, "Unable to map RSA tmp1 memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto unmap_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (dma_mapping_error(dev, pdb->tmp2_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dev_err(dev, "Unable to map RSA tmp2 memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) goto unmap_tmp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (edesc->mapped_src_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) pdb->sgf |= RSA_PRIV_PDB_SGF_G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pdb->g_dma = edesc->sec4_sg_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) sec4_sg_index += edesc->mapped_src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (edesc->mapped_dst_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pdb->sgf |= RSA_PRIV_PDB_SGF_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) pdb->f_dma = edesc->sec4_sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) sec4_sg_index * sizeof(struct sec4_sg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) pdb->f_dma = sg_dma_address(req->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) unmap_tmp1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unmap_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) unmap_p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unmap_d:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct rsa_edesc *edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct device *dev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int sec4_sg_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) size_t p_sz = key->p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) size_t q_sz = key->q_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (dma_mapping_error(dev, pdb->p_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dev_err(dev, "Unable to map RSA prime factor p memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (dma_mapping_error(dev, pdb->q_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_err(dev, "Unable to map RSA prime factor q memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto unmap_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (dma_mapping_error(dev, pdb->dp_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev_err(dev, "Unable to map RSA exponent dp memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) goto unmap_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (dma_mapping_error(dev, pdb->dq_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dev_err(dev, "Unable to map RSA exponent dq memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto unmap_dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (dma_mapping_error(dev, pdb->c_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) goto unmap_dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (dma_mapping_error(dev, pdb->tmp1_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dev_err(dev, "Unable to map RSA tmp1 memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) goto unmap_qinv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (dma_mapping_error(dev, pdb->tmp2_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev_err(dev, "Unable to map RSA tmp2 memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto unmap_tmp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (edesc->mapped_src_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pdb->sgf |= RSA_PRIV_PDB_SGF_G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) pdb->g_dma = edesc->sec4_sg_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sec4_sg_index += edesc->mapped_src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (edesc->mapped_dst_nents > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) pdb->sgf |= RSA_PRIV_PDB_SGF_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pdb->f_dma = edesc->sec4_sg_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) sec4_sg_index * sizeof(struct sec4_sg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pdb->f_dma = sg_dma_address(req->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pdb->sgf |= key->n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unmap_tmp1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unmap_qinv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unmap_dq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unmap_dp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unmap_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unmap_p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int akcipher_enqueue_req(struct device *jrdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void (*cbk)(struct device *jrdev, u32 *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u32 err, void *context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct rsa_edesc *edesc = req_ctx->edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 *desc = edesc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) req_ctx->akcipher_op_done = cbk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Only the backlog request are sent to crypto-engine since the others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * can be handled by CAAM, if free, especially since JR has up to 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * entries (more than the 10 entries from crypto-engine).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ret = caam_jr_enqueue(jrdev, desc, cbk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) switch (key->priv_form) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) case FORM1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rsa_priv_f1_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) case FORM2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rsa_priv_f2_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) case FORM3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rsa_priv_f3_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rsa_pub_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) rsa_io_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int caam_rsa_enc(struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct device *jrdev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (unlikely(!key->n || !key->e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (req->dst_len < key->n_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) req->dst_len = key->n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_err(jrdev, "Output buffer length less than parameter n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Allocate extended descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (IS_ERR(edesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return PTR_ERR(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Set RSA Encrypt Protocol Data Block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ret = set_rsa_pub_pdb(req, edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Initialize Job Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) rsa_io_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct device *jrdev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Allocate extended descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (IS_ERR(edesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return PTR_ERR(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = set_rsa_priv_f1_pdb(req, edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) goto init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* Initialize Job Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rsa_io_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct device *jrdev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Allocate extended descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (IS_ERR(edesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return PTR_ERR(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = set_rsa_priv_f2_pdb(req, edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /* Initialize Job Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rsa_io_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct device *jrdev = ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct rsa_edesc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Allocate extended descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (IS_ERR(edesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return PTR_ERR(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ret = set_rsa_priv_f3_pdb(req, edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) goto init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Initialize Job Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) rsa_io_unmap(jrdev, edesc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int caam_rsa_dec(struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (unlikely(!key->n || !key->d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (req->dst_len < key->n_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) req->dst_len = key->n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dev_err(ctx->dev, "Output buffer length less than parameter n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (key->priv_form == FORM3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ret = caam_rsa_dec_priv_f3(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) else if (key->priv_form == FORM2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ret = caam_rsa_dec_priv_f2(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ret = caam_rsa_dec_priv_f1(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static void caam_rsa_free_key(struct caam_rsa_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) kfree_sensitive(key->d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) kfree_sensitive(key->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) kfree_sensitive(key->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) kfree_sensitive(key->dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) kfree_sensitive(key->dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) kfree_sensitive(key->qinv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) kfree_sensitive(key->tmp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) kfree_sensitive(key->tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) kfree(key->e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) kfree(key->n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) memset(key, 0, sizeof(*key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) while (!**ptr && *nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) (*ptr)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) (*nbytes)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * dP, dQ and qInv could decode to less than corresponding p, q length, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * BER-encoding requires that the minimum number of bytes be used to encode the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * @ptr : pointer to {dP, dQ, qInv} CRT member
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @dstlen: length in bytes of corresponding p or q prime factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) u8 *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) caam_rsa_drop_leading_zeros(&ptr, &nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) memcpy(dst + (dstlen - nbytes), ptr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * caam_read_raw_data - Read a raw byte stream as a positive integer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * The function skips buffer's leading zeros, copies the remained data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * the address of the new buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @buf : The data to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * @nbytes: The amount of data to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) caam_rsa_drop_leading_zeros(&buf, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!*nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static int caam_rsa_check_key_length(unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (len > 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct rsa_key raw_key = {NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct caam_rsa_key *rsa_key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Free the old RSA key if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) caam_rsa_free_key(rsa_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ret = rsa_parse_pub_key(&raw_key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Copy key in DMA zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (!rsa_key->e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Skip leading zeros and copy the positive integer to a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * expects a positive integer for the RSA modulus and uses its length as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * decryption output length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!rsa_key->n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) caam_rsa_free_key(rsa_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) rsa_key->e_sz = raw_key.e_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) rsa_key->n_sz = raw_key.n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) caam_rsa_free_key(rsa_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct rsa_key *raw_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct caam_rsa_key *rsa_key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) size_t p_sz = raw_key->p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) size_t q_sz = raw_key->q_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!rsa_key->p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rsa_key->p_sz = p_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!rsa_key->q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto free_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rsa_key->q_sz = q_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (!rsa_key->tmp1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto free_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (!rsa_key->tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) goto free_tmp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) rsa_key->priv_form = FORM2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!rsa_key->dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) goto free_tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!rsa_key->dq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto free_dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) q_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!rsa_key->qinv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto free_dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) rsa_key->priv_form = FORM3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) free_dq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) kfree_sensitive(rsa_key->dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) free_dp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kfree_sensitive(rsa_key->dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) free_tmp2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) kfree_sensitive(rsa_key->tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) free_tmp1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) kfree_sensitive(rsa_key->tmp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) free_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) kfree_sensitive(rsa_key->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) free_p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) kfree_sensitive(rsa_key->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct rsa_key raw_key = {NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct caam_rsa_key *rsa_key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Free the old RSA key if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) caam_rsa_free_key(rsa_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ret = rsa_parse_priv_key(&raw_key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /* Copy key in DMA zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (!rsa_key->d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!rsa_key->e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * Skip leading zeros and copy the positive integer to a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * expects a positive integer for the RSA modulus and uses its length as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * decryption output length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (!rsa_key->n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) caam_rsa_free_key(rsa_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) rsa_key->d_sz = raw_key.d_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) rsa_key->e_sz = raw_key.e_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) rsa_key->n_sz = raw_key.n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) caam_rsa_set_priv_key_form(ctx, &raw_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) caam_rsa_free_key(rsa_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return ctx->key.n_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* Per session pkc's driver context creation function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ctx->dev = caam_jr_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (IS_ERR(ctx->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) pr_err("Job Ring Device allocation for transform failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return PTR_ERR(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) CAAM_RSA_MAX_INPUT_SIZE - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_err(ctx->dev, "unable to map padding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) caam_jr_free(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ctx->enginectx.op.do_one_request = akcipher_do_one_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Per session pkc's driver context cleanup function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct caam_rsa_key *key = &ctx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) caam_rsa_free_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) caam_jr_free(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static struct caam_akcipher_alg caam_rsa = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .akcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .encrypt = caam_rsa_enc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .decrypt = caam_rsa_dec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .set_pub_key = caam_rsa_set_pub_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .set_priv_key = caam_rsa_set_priv_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .max_size = caam_rsa_max_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .init = caam_rsa_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) .exit = caam_rsa_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .reqsize = sizeof(struct caam_rsa_req_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .cra_name = "rsa",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .cra_driver_name = "rsa-caam",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .cra_priority = 3000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .cra_ctxsize = sizeof(struct caam_rsa_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Public Key Cryptography module initialization handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int caam_pkc_init(struct device *ctrldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) u32 pk_inst, pkha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) init_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* Determine public key hardware accelerator presence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (priv->era < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) pkha = rd_reg32(&priv->ctrl->vreg.pkha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) pk_inst = pkha & CHA_VER_NUM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * Newer CAAMs support partially disabled functionality. If this is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * case, the number is non-zero, but this bit is set to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * no encryption or decryption is supported. Only signing and verifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) pk_inst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* Do not register algorithms if PKHA is not present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!pk_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* allocate zero buffer, used for padding input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (!zero_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err = crypto_register_akcipher(&caam_rsa.akcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) kfree(zero_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) dev_warn(ctrldev, "%s alg registration failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) caam_rsa.akcipher.base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) init_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) caam_rsa.registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) void caam_pkc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (!init_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (caam_rsa.registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) crypto_unregister_akcipher(&caam_rsa.akcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) kfree(zero_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }