Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Author: Arnaud Ebalard <arno@natisbad.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This work is based on an initial version written by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <crypto/hmac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "cesa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) struct mv_cesa_ahash_dma_iter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	struct mv_cesa_dma_iter base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	struct mv_cesa_sg_dma_iter src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 			    struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	unsigned int len = req->nbytes + creq->cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	if (!creq->last_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	mv_cesa_req_dma_iter_init(&iter->base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	iter->src.op_offset = creq->cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	iter->src.op_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	return mv_cesa_req_dma_iter_next_op(&iter->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 				    &req->cache_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	if (!req->cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	if (!req->cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		      req->cache_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 					   gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	if (req->padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 				      &req->padding_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (!req->padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (!req->padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		      req->padding_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	req->padding = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	mv_cesa_dma_cleanup(&creq->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		mv_cesa_ahash_dma_cleanup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		mv_cesa_ahash_dma_last_cleanup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	unsigned int index, padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	unsigned int padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	buf[0] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	/* Pad out to 56 mod 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	padlen = mv_cesa_ahash_pad_len(creq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	memset(buf + 1, 0, padlen - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (creq->algo_le) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		__le64 bits = cpu_to_le64(creq->len << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		memcpy(buf + padlen, &bits, sizeof(bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		__be64 bits = cpu_to_be64(creq->len << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		memcpy(buf + padlen, &bits, sizeof(bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	return padlen + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static void mv_cesa_ahash_std_step(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct mv_cesa_engine *engine = creq->base.engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct mv_cesa_op_ctx *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	unsigned int new_cache_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	u32 frag_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	size_t  len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	unsigned int digsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	mv_cesa_adjust_op(engine, &creq->op_tmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (!sreq->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		for (i = 0; i < digsize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			writel_relaxed(creq->state[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				       engine->regs + CESA_IVDIG(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	if (creq->cache_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			    creq->cache, creq->cache_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		    CESA_SA_SRAM_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (!creq->last_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (len - creq->cache_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 						   engine->sram +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 						   CESA_SA_DATA_SRAM_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 						   creq->cache_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 						   len - creq->cache_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 						   sreq->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	op = &creq->op_tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (creq->last_req && sreq->offset == req->nbytes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		if (len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			mv_cesa_set_mac_op_total_len(op, creq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 				len &= CESA_HASH_BLOCK_SIZE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 				new_cache_ptr = 64 - trailerlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 				memcpy_fromio(creq->cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 					      engine->sram +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 					      CESA_SA_DATA_SRAM_OFFSET + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 					      new_cache_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				i = mv_cesa_ahash_pad_req(creq, creq->cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 				len += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				memcpy_toio(engine->sram + len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 					    CESA_SA_DATA_SRAM_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 					    creq->cache, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	mv_cesa_set_mac_op_frag_len(op, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* FIXME: only update enc_len field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	memcpy_toio(engine->sram, op, sizeof(*op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 				      CESA_SA_DESC_CFG_FRAG_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	creq->cache_ptr = new_cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		CESA_SA_CMD_EN_CESA_SA_ACCL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	if (sreq->offset < (req->nbytes - creq->cache_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct mv_cesa_req *basereq = &creq->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	mv_cesa_dma_prepare(basereq, basereq->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	sreq->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static void mv_cesa_ahash_dma_step(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	struct mv_cesa_req *base = &creq->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	/* We must explicitly set the digest state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		struct mv_cesa_engine *engine = base->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		/* Set the hash state in the IVDIG regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			writel_relaxed(creq->state[i], engine->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 				       CESA_IVDIG(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	mv_cesa_dma_step(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static void mv_cesa_ahash_step(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct ahash_request *ahashreq = ahash_request_cast(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		mv_cesa_ahash_dma_step(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		mv_cesa_ahash_std_step(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	struct ahash_request *ahashreq = ahash_request_cast(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return mv_cesa_dma_process(&creq->base, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	return mv_cesa_ahash_std_process(ahashreq, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static void mv_cesa_ahash_complete(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	struct ahash_request *ahashreq = ahash_request_cast(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct mv_cesa_engine *engine = creq->base.engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	unsigned int digsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	     CESA_TDMA_RESULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		__le32 *data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		 * Result is already in the correct endianness when the SA is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		data = creq->base.chain.last->op->ctx.hash.hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		for (i = 0; i < digsize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			creq->state[i] = le32_to_cpu(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		memcpy(ahashreq->result, data, digsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		for (i = 0; i < digsize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			creq->state[i] = readl_relaxed(engine->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 						       CESA_IVDIG(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (creq->last_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			 * Hardware's MD5 digest is in little endian format, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			 * SHA in big endian format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			if (creq->algo_le) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 				__le32 *result = (void *)ahashreq->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				for (i = 0; i < digsize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 					result[i] = cpu_to_le32(creq->state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 				__be32 *result = (void *)ahashreq->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				for (i = 0; i < digsize / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 					result[i] = cpu_to_be32(creq->state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	atomic_sub(ahashreq->nbytes, &engine->load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				  struct mv_cesa_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct ahash_request *ahashreq = ahash_request_cast(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	creq->base.engine = engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		mv_cesa_ahash_dma_prepare(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		mv_cesa_ahash_std_prepare(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct ahash_request *ahashreq = ahash_request_cast(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (creq->last_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		mv_cesa_ahash_last_cleanup(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	mv_cesa_ahash_cleanup(ahashreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (creq->cache_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 				   creq->cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 				   creq->cache_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 				   ahashreq->nbytes - creq->cache_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	.step = mv_cesa_ahash_step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	.process = mv_cesa_ahash_process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	.cleanup = mv_cesa_ahash_req_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	.complete = mv_cesa_ahash_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static void mv_cesa_ahash_init(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	memset(creq, 0, sizeof(*creq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	mv_cesa_update_op_cfg(tmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			      CESA_SA_DESC_CFG_FIRST_FRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			      CESA_SA_DESC_CFG_OP_MSK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			      CESA_SA_DESC_CFG_FRAG_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	mv_cesa_set_mac_op_total_len(tmpl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	mv_cesa_set_mac_op_frag_len(tmpl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	creq->op_tmpl = *tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	creq->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	creq->algo_le = algo_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	ctx->base.ops = &mv_cesa_ahash_req_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 				 sizeof(struct mv_cesa_ahash_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	bool cached = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	    !creq->last_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		cached = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			return cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		sg_pcopy_to_buffer(req->src, creq->src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				   creq->cache + creq->cache_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 				   req->nbytes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		creq->cache_ptr += req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	return cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static struct mv_cesa_op_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		     gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct mv_cesa_op_ctx *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	/* Set the operation block fragment length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	mv_cesa_set_mac_op_frag_len(op, frag_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	/* Append dummy desc to launch operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if (mv_cesa_mac_op_is_first_frag(tmpl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		mv_cesa_update_op_cfg(tmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				      CESA_SA_DESC_CFG_MID_FRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 				      CESA_SA_DESC_CFG_FRAG_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			    struct mv_cesa_ahash_req *creq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			    gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (!creq->cache_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	return mv_cesa_dma_add_data_transfer(chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 					     CESA_SA_DATA_SRAM_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 					     ahashdreq->cache_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 					     creq->cache_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 					     CESA_TDMA_DST_IN_SRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 					     flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static struct mv_cesa_op_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			   struct mv_cesa_ahash_dma_iter *dma_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			   struct mv_cesa_ahash_req *creq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			   unsigned int frag_len, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	unsigned int len, trailerlen, padoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct mv_cesa_op_ctx *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * If the transfer is smaller than our maximum length, and we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 * some data outstanding, we can ask the engine to finish the hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 					  flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		mv_cesa_set_mac_op_total_len(op, creq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 						CESA_SA_DESC_CFG_NOT_FRAG :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 						CESA_SA_DESC_CFG_LAST_FRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				      CESA_SA_DESC_CFG_FRAG_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		ret = mv_cesa_dma_add_result_op(chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 						CESA_SA_CFG_SRAM_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 						CESA_SA_DATA_SRAM_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 						CESA_TDMA_SRC_IN_SRAM, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 * The request is longer than the engine can handle, or we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	 * no data outstanding. Manually generate the padding, adding it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 * as a "mid" fragment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		ret = mv_cesa_dma_add_data_transfer(chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 						CESA_SA_DATA_SRAM_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 						frag_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 						ahashdreq->padding_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 						len, CESA_TDMA_DST_IN_SRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 						flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 					  flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		if (len == trailerlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		padoff += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	ret = mv_cesa_dma_add_data_transfer(chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 					    CESA_SA_DATA_SRAM_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 					    ahashdreq->padding_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 					    padoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 					    trailerlen - padoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 					    CESA_TDMA_DST_IN_SRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 					    flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 				    flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		      GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	struct mv_cesa_req *basereq = &creq->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct mv_cesa_ahash_dma_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct mv_cesa_op_ctx *op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	unsigned int frag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	bool set_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	basereq->chain.first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	basereq->chain.last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		set_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (creq->src_nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	mv_cesa_tdma_desc_iter_init(&basereq->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	mv_cesa_ahash_req_iter_init(&iter, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 * Add the cache (left-over data from a previous block) first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 * This will never overflow the SRAM size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		goto err_free_tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (iter.src.sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		 * Add all the new data, inserting an operation block and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		 * launch command between each full SRAM block-worth of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		 * data. We intentionally do not add the final op block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 							   &iter.base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 							   &iter.src, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				goto err_free_tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			frag_len = iter.base.op_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			if (!mv_cesa_ahash_req_iter_next_op(&iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			op = mv_cesa_dma_add_frag(&basereq->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 						  &creq->op_tmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 						  frag_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			if (IS_ERR(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				ret = PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				goto err_free_tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		/* Account for the data that was in the cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		frag_len = iter.base.op_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * At this point, frag_len indicates whether we have any data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 * outstanding which needs an operation.  Queue up the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * operation, which depends whether this is the final request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (creq->last_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 						frag_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	else if (frag_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 					  frag_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (IS_ERR(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		ret = PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		goto err_free_tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * If results are copied via DMA, this means that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 * request can be directly processed by the engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	 * without partial updates. So we can chain it at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	 * DMA level with other requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (op && type != CESA_TDMA_RESULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		/* Add dummy desc to wait for crypto operation end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			goto err_free_tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!creq->last_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		creq->cache_ptr = req->nbytes + creq->cache_ptr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 				  iter.base.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		creq->cache_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (type != CESA_TDMA_RESULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	if (set_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		 * let the step logic know that the IVDIG registers should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		 * explicitly set before launching a TDMA chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) err_free_tdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	mv_cesa_dma_cleanup(basereq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	mv_cesa_ahash_last_cleanup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (creq->src_nents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		dev_err(cesa_dev->dev, "Invalid number of src SG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		return creq->src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	*cached = mv_cesa_ahash_cache_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (*cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (cesa_dev->caps->has_tdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		return mv_cesa_ahash_dma_req_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) static int mv_cesa_ahash_queue_req(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct mv_cesa_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	bool cached = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	ret = mv_cesa_ahash_req_init(req, &cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (cached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	engine = mv_cesa_select_engine(req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	mv_cesa_ahash_prepare(&req->base, engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	ret = mv_cesa_queue_req(&req->base, &creq->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		mv_cesa_ahash_cleanup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) static int mv_cesa_ahash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	creq->len += req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	return mv_cesa_ahash_queue_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static int mv_cesa_ahash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	creq->last_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	req->nbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	return mv_cesa_ahash_queue_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static int mv_cesa_ahash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	creq->len += req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	creq->last_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return mv_cesa_ahash_queue_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 				u64 *len, void *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	unsigned int digsize = crypto_ahash_digestsize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	unsigned int blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	blocksize = crypto_ahash_blocksize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	*len = creq->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	memcpy(hash, creq->state, digsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	memset(cache, 0, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	memcpy(cache, creq->cache, creq->cache_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				u64 len, const void *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	unsigned int digsize = crypto_ahash_digestsize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	unsigned int blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	unsigned int cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	ret = crypto_ahash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	blocksize = crypto_ahash_blocksize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (len >= blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		mv_cesa_update_op_cfg(&creq->op_tmpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 				      CESA_SA_DESC_CFG_MID_FRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 				      CESA_SA_DESC_CFG_FRAG_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	creq->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	memcpy(creq->state, hash, digsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	creq->cache_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	cache_ptr = do_div(len, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (!cache_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	memcpy(creq->cache, cache, cache_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	creq->cache_ptr = cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) static int mv_cesa_md5_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct mv_cesa_op_ctx tmpl = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	mv_cesa_ahash_init(req, &tmpl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	creq->state[0] = MD5_H0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	creq->state[1] = MD5_H1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	creq->state[2] = MD5_H2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	creq->state[3] = MD5_H3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static int mv_cesa_md5_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct md5_state *out_state = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	return mv_cesa_ahash_export(req, out_state->hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 				    &out_state->byte_count, out_state->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	const struct md5_state *in_state = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				    in_state->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static int mv_cesa_md5_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	ret = mv_cesa_md5_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	return mv_cesa_ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) struct ahash_alg mv_md5_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	.init = mv_cesa_md5_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	.update = mv_cesa_ahash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	.final = mv_cesa_ahash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	.finup = mv_cesa_ahash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	.digest = mv_cesa_md5_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	.export = mv_cesa_md5_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	.import = mv_cesa_md5_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	.halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		.digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		.statesize = sizeof(struct md5_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			.cra_name = "md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			.cra_driver_name = "mv-md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				     CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			.cra_init = mv_cesa_ahash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static int mv_cesa_sha1_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct mv_cesa_op_ctx tmpl = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	mv_cesa_ahash_init(req, &tmpl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	creq->state[0] = SHA1_H0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	creq->state[1] = SHA1_H1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	creq->state[2] = SHA1_H2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	creq->state[3] = SHA1_H3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	creq->state[4] = SHA1_H4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct sha1_state *out_state = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				    out_state->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	const struct sha1_state *in_state = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				    in_state->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) static int mv_cesa_sha1_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	ret = mv_cesa_sha1_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	return mv_cesa_ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) struct ahash_alg mv_sha1_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	.init = mv_cesa_sha1_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	.update = mv_cesa_ahash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	.final = mv_cesa_ahash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	.finup = mv_cesa_ahash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	.digest = mv_cesa_sha1_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	.export = mv_cesa_sha1_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	.import = mv_cesa_sha1_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	.halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		.statesize = sizeof(struct sha1_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			.cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			.cra_driver_name = "mv-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				     CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			.cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			.cra_init = mv_cesa_ahash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int mv_cesa_sha256_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct mv_cesa_op_ctx tmpl = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	mv_cesa_ahash_init(req, &tmpl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	creq->state[0] = SHA256_H0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	creq->state[1] = SHA256_H1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	creq->state[2] = SHA256_H2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	creq->state[3] = SHA256_H3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	creq->state[4] = SHA256_H4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	creq->state[5] = SHA256_H5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	creq->state[6] = SHA256_H6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	creq->state[7] = SHA256_H7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static int mv_cesa_sha256_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	ret = mv_cesa_sha256_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	return mv_cesa_ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	struct sha256_state *out_state = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				    out_state->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	const struct sha256_state *in_state = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				    in_state->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct ahash_alg mv_sha256_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	.init = mv_cesa_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	.update = mv_cesa_ahash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	.final = mv_cesa_ahash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	.finup = mv_cesa_ahash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	.digest = mv_cesa_sha256_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	.export = mv_cesa_sha256_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	.import = mv_cesa_sha256_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	.halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		.statesize = sizeof(struct sha256_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			.cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			.cra_driver_name = "mv-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				     CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			.cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			.cra_init = mv_cesa_ahash_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct mv_cesa_ahash_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 					int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct mv_cesa_ahash_result *result = req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (error == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	result->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	complete(&result->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				       void *state, unsigned int blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	struct mv_cesa_ahash_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 				   mv_cesa_hmac_ahash_complete, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	sg_init_one(&sg, pad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	ahash_request_set_crypt(req, &sg, pad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	init_completion(&result.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	ret = crypto_ahash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	ret = crypto_ahash_update(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (ret && ret != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	wait_for_completion_interruptible(&result.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (result.error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		return result.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	ret = crypto_ahash_export(req, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				  const u8 *key, unsigned int keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				  u8 *ipad, u8 *opad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				  unsigned int blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	struct mv_cesa_ahash_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	if (keylen <= blocksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		memcpy(ipad, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		if (!keydup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 					   mv_cesa_hmac_ahash_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 					   &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		sg_init_one(&sg, keydup, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		ahash_request_set_crypt(req, &sg, ipad, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		init_completion(&result.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		ret = crypto_ahash_digest(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		if (ret == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			wait_for_completion_interruptible(&result.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			ret = result.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		/* Set the memory region to 0 to avoid any leak. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		kfree_sensitive(keydup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	memset(ipad + keylen, 0, blocksize - keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	memcpy(opad, ipad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	for (i = 0; i < blocksize; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		ipad[i] ^= HMAC_IPAD_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		opad[i] ^= HMAC_OPAD_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 				const u8 *key, unsigned int keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				void *istate, void *ostate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	struct crypto_ahash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	unsigned int blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	u8 *ipad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	u8 *opad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (IS_ERR(tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		return PTR_ERR(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	req = ahash_request_alloc(tfm, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		goto free_ahash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	crypto_ahash_clear_flags(tfm, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	ipad = kcalloc(2, blocksize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	if (!ipad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		goto free_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	opad = ipad + blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		goto free_ipad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		goto free_ipad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) free_ipad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	kfree(ipad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) free_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	ahash_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) free_ahash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	crypto_free_ahash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	ctx->base.ops = &mv_cesa_ahash_req_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 				 sizeof(struct mv_cesa_ahash_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	struct mv_cesa_op_ctx tmpl = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	mv_cesa_ahash_init(req, &tmpl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				    unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct md5_state istate, ostate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		ctx->iv[i] = cpu_to_be32(istate.hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	ret = mv_cesa_ahmac_md5_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	return mv_cesa_ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct ahash_alg mv_ahmac_md5_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	.init = mv_cesa_ahmac_md5_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	.update = mv_cesa_ahash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	.final = mv_cesa_ahash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	.finup = mv_cesa_ahash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	.digest = mv_cesa_ahmac_md5_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	.setkey = mv_cesa_ahmac_md5_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	.export = mv_cesa_md5_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	.import = mv_cesa_md5_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	.halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		.digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		.statesize = sizeof(struct md5_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			.cra_name = "hmac(md5)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			.cra_driver_name = "mv-hmac-md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				     CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			.cra_init = mv_cesa_ahmac_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	struct mv_cesa_op_ctx tmpl = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	mv_cesa_ahash_init(req, &tmpl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 				     unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	struct sha1_state istate, ostate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		ctx->iv[i] = cpu_to_be32(istate.state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	ret = mv_cesa_ahmac_sha1_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	return mv_cesa_ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct ahash_alg mv_ahmac_sha1_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	.init = mv_cesa_ahmac_sha1_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	.update = mv_cesa_ahash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	.final = mv_cesa_ahash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	.finup = mv_cesa_ahash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	.digest = mv_cesa_ahmac_sha1_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	.setkey = mv_cesa_ahmac_sha1_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	.export = mv_cesa_sha1_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	.import = mv_cesa_sha1_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	.halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		.statesize = sizeof(struct sha1_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			.cra_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			.cra_driver_name = "mv-hmac-sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 				     CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			.cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			.cra_init = mv_cesa_ahmac_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 				       unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	struct sha256_state istate, ostate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		ctx->iv[i] = cpu_to_be32(istate.state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	struct mv_cesa_op_ctx tmpl = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	mv_cesa_ahash_init(req, &tmpl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	ret = mv_cesa_ahmac_sha256_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	return mv_cesa_ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct ahash_alg mv_ahmac_sha256_alg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	.init = mv_cesa_ahmac_sha256_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	.update = mv_cesa_ahash_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	.final = mv_cesa_ahash_final,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	.finup = mv_cesa_ahash_finup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	.digest = mv_cesa_ahmac_sha256_digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	.setkey = mv_cesa_ahmac_sha256_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	.export = mv_cesa_sha256_export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	.import = mv_cesa_sha256_import,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	.halg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		.statesize = sizeof(struct sha256_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 			.cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			.cra_driver_name = "mv-hmac-sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			.cra_priority = 300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 				     CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			.cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			.cra_init = mv_cesa_ahmac_cra_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			.cra_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) };