Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2016 Broadcom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <crypto/internal/des.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <crypto/hmac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <crypto/md5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <crypto/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <crypto/sha3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "cipher.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "spu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "spum.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "spu2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /* ================= Device Structure ================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) struct bcm_device_private iproc_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /* ==================== Parameters ===================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) int flow_debug_logging;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) module_param(flow_debug_logging, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) int packet_debug_logging;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) module_param(packet_debug_logging, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) int debug_logging_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) module_param(debug_logging_sleep, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * The value of these module parameters is used to set the priority for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * algo type when this driver registers algos with the kernel crypto API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * To use a priority other than the default, set the priority in the insmod or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * modprobe. Changing the module priority after init time has no effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * algos, but more preferred than generic software algos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static int cipher_pri = 150;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) module_param(cipher_pri, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int hash_pri = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) module_param(hash_pri, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static int aead_pri = 150;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) module_param(aead_pri, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * 0x60 - ring 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * 0x68 - ring 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * 0x70 - ring 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * 0x78 - ring 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * is set dynamically after reading SPU type from device tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) /* min and max time to sleep before retrying when mbox queue is full. usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define MBOX_SLEEP_MIN  800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define MBOX_SLEEP_MAX 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * select_channel() - Select a SPU channel to handle a crypto request. Selects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * channel in round robin order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Return:  channel index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static u8 select_channel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	return chan_idx % iproc_priv.spu.num_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * receive a SPU response message for an skcipher request. Includes buffers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * catch SPU message headers and the response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * @mssg:	mailbox message containing the receive sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * @rctx:	crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * @rx_frag_num: number of scatterlist elements required to hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *		SPU response message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * @chunksize:	Number of bytes of response data expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * @stat_pad_len: Number of bytes required to pad the STAT field to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *		a 4-byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * when the request completes, whether the request is handled successfully or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *   0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  *   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) spu_skcipher_rx_sg_create(struct brcm_message *mssg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			    struct iproc_reqctx_s *rctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			    u8 rx_frag_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			    unsigned int chunksize, u32 stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	struct scatterlist *sg;	/* used to build sgs in mbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	u32 datalen;		/* Number of bytes of response data expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				rctx->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (!mssg->spu.dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	sg = mssg->spu.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	sg_init_table(sg, rx_frag_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	/* Space for SPU message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	    spu->spu_xts_tweak_in_payload())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			   SPU_XTS_TWEAK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Copy in each dst sg entry from request, up to chunksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 				 rctx->dst_nents, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (datalen < chunksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		       __func__, chunksize, datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	if (stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * send a SPU request message for an skcipher request. Includes SPU message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * headers and the request data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * @mssg:	mailbox message containing the transmit sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * @rctx:	crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * @tx_frag_num: number of scatterlist elements required to construct the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  *		SPU request message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * @chunksize:	Number of bytes of request data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * @pad_len:	Number of pad bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * when the request completes, whether the request is handled successfully or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *   0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  *   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) spu_skcipher_tx_sg_create(struct brcm_message *mssg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			    struct iproc_reqctx_s *rctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	struct scatterlist *sg;	/* used to build sgs in mbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	u32 datalen;		/* Number of bytes of response data expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	u32 stat_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 				rctx->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	if (unlikely(!mssg->spu.src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	sg = mssg->spu.src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	sg_init_table(sg, tx_frag_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	    spu->spu_xts_tweak_in_payload())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	/* Copy in each src sg entry from request, up to chunksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 				 rctx->src_nents, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (unlikely(datalen < chunksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		pr_err("%s(): failed to copy src sg to mbox msg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	stat_len = spu->spu_tx_status_len();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (stat_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 				u8 chan_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	int retry_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct device *dev = &(iproc_priv.pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			 * not in atomic context and we can wait and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			retry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			err = mbox_send_message(iproc_priv.mbox[chan_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 						mssg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			atomic_inc(&iproc_priv.mb_no_spc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		atomic_inc(&iproc_priv.mb_send_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/* Check error returned by mailbox controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	err = mssg->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		dev_err(dev, "message error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		/* Signal txdone for mailbox channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	/* Signal txdone for mailbox channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * handle_skcipher_req() - Submit as much of a block cipher request as fits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * a single SPU request message, starting at the current position in the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * @rctx:	Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * This may be called on the crypto API thread, or, when a request is so large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * it must be broken into multiple SPU messages, on the thread used to invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * the response callback. When requests are broken into multiple SPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * messages, we assume subsequent messages depend on previous results, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * thus always wait for previous results before submitting the next message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * Because requests are submitted in lock step like this, there is no need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * to synchronize access to request data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * Return: -EINPROGRESS: request has been accepted and result will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  *			 asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  *         Any other value indicates an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct skcipher_request *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	    container_of(areq, struct skcipher_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	struct spu_cipher_parms cipher_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	unsigned int chunksize;	/* Num bytes of request to submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	int remaining;	/* Bytes of request still to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	int chunk_start;	/* Beginning of data for current SPU msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/* IV or ctr value to use in this SPU msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	u8 local_iv_ctr[MAX_IV_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	u32 stat_pad_len;	/* num bytes to align status field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	u32 pad_len;		/* total length of all padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct brcm_message *mssg;	/* mailbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	/* number of entries in src and dst sg in mailbox message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	u8 rx_frag_num = 2;	/* response header and STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	u8 tx_frag_num = 1;	/* request header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	flow_log("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	cipher_parms.alg = ctx->cipher.alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	cipher_parms.mode = ctx->cipher.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	cipher_parms.type = ctx->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	cipher_parms.key_len = ctx->enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	cipher_parms.key_buf = ctx->enckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	cipher_parms.iv_buf = local_iv_ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	cipher_parms.iv_len = rctx->iv_ctr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	mssg = &rctx->mb_mssg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	chunk_start = rctx->src_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	remaining = rctx->total_todo - chunk_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	/* determine the chunk we are breaking off and update the indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	    (remaining > ctx->max_payload))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		chunksize = ctx->max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		chunksize = remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	rctx->src_sent += chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	rctx->total_sent = rctx->src_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/* Count number of sg entries to be included in this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	    rctx->is_encrypt && chunk_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		 * Encrypting non-first first chunk. Copy last block of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		 * previous result to IV for this chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 				    rctx->iv_ctr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				    chunk_start - rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (rctx->iv_ctr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		/* get our local copy of the iv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				 rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		/* generate the next IV if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		    !rctx->is_encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			 * CBC Decrypt: next IV is the last ciphertext block in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			 * this chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 					    rctx->iv_ctr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 					    rctx->src_sent - rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			 * The SPU hardware increments the counter once for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			 * each AES block of 16 bytes. So update the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			 * for the next chunk, if there is one. Note that for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			 * this chunk, the counter has already been copied to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			 * local_iv_ctr. We can assume a block size of 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			 * because we only support CTR mode for AES, not for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			 * any other cipher alg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		flow_log("max_payload infinite\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		flow_log("max_payload %u\n", ctx->max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	flow_log("sent:%u start:%u remains:%u size:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		 rctx->src_sent, chunk_start, remaining, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* Copy SPU header template created at setkey time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 				   &cipher_parms, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	atomic64_add(chunksize, &iproc_priv.bytes_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		rx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	pad_len = stat_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 				     0, ctx->auth.alg, ctx->auth.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				     rctx->total_sent, stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			      ctx->spu_req_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	packet_log("payload:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * Build mailbox message containing SPU request msg and rx buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 * to catch response message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	memset(mssg, 0, sizeof(*mssg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	mssg->type = BRCM_MESSAGE_SPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	mssg->ctx = rctx;	/* Will be returned in response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	/* Create rx scatterlist to catch result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	rx_frag_num += rctx->dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	    spu->spu_xts_tweak_in_payload())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		rx_frag_num++;	/* extra sg to insert tweak */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 					  stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* Create tx scatterlist containing SPU request message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	tx_frag_num += rctx->src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (spu->spu_tx_status_len())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	    spu->spu_xts_tweak_in_payload())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		tx_frag_num++;	/* extra sg to insert tweak */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 					  pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * total received count for the request and updates global stats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  * @rctx:	Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct skcipher_request *req = skcipher_request_cast(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	u32 payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	/* See how much data was returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 * encrypted tweak ("i") value; we don't count those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	    spu->spu_xts_tweak_in_payload() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	    (payload_len >= SPU_XTS_TWEAK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		payload_len -= SPU_XTS_TWEAK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	atomic64_add(payload_len, &iproc_priv.bytes_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	flow_log("%s() offset: %u, bd_len: %u BD:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 __func__, rctx->total_received, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	dump_sg(req->dst, rctx->total_received, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	rctx->total_received += payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	if (rctx->total_received == rctx->total_todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		atomic_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * receive a SPU response message for an ahash request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * @mssg:	mailbox message containing the receive sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * @rctx:	crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * @rx_frag_num: number of scatterlist elements required to hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  *		SPU response message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * @digestsize: length of hash digest, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * @stat_pad_len: Number of bytes required to pad the STAT field to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  *		a 4-byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  * when the request completes, whether the request is handled successfully or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  *   0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  *   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) spu_ahash_rx_sg_create(struct brcm_message *mssg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		       struct iproc_reqctx_s *rctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		       u8 rx_frag_num, unsigned int digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		       u32 stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct scatterlist *sg;	/* used to build sgs in mbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 				rctx->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (!mssg->spu.dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	sg = mssg->spu.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	sg_init_table(sg, rx_frag_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* Space for SPU message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	/* Space for digest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * a SPU request message for an ahash request. Includes SPU message headers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * the request data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * @mssg:	mailbox message containing the transmit sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * @rctx:	crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * @tx_frag_num: number of scatterlist elements required to construct the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  *		SPU request message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * @spu_hdr_len: length in bytes of SPU message header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  * @hash_carry_len: Number of bytes of data carried over from previous req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * @new_data_len: Number of bytes of new request data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * @pad_len:	Number of pad bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * when the request completes, whether the request is handled successfully or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  *   0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  *   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) spu_ahash_tx_sg_create(struct brcm_message *mssg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		       struct iproc_reqctx_s *rctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		       u8 tx_frag_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		       u32 spu_hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		       unsigned int hash_carry_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		       unsigned int new_data_len, u32 pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct scatterlist *sg;	/* used to build sgs in mbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	u32 datalen;		/* Number of bytes of response data expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	u32 stat_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				rctx->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (!mssg->spu.src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	sg = mssg->spu.src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	sg_init_table(sg, tx_frag_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		   BCM_HDR_LEN + spu_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (hash_carry_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (new_data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		/* Copy in each src sg entry from request, up to chunksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 					 rctx->src_nents, new_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		if (datalen < new_data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			pr_err("%s(): failed to copy src sg to mbox msg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	stat_len = spu->spu_tx_status_len();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (stat_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  * handle_ahash_req() - Process an asynchronous hash request from the crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  * API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * @rctx:  Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * Builds a SPU request message embedded in a mailbox message and submits the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * mailbox message on a selected mailbox channel. The SPU request message is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * constructed as a scatterlist, including entries from the crypto API's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * src scatterlist to avoid copying the data to be hashed. This function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  * called either on the thread from the crypto API, or, in the case that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * crypto API request is too large to fit in a single SPU request message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * on the thread that invokes the receive callback with a response message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * Because some operations require the response from one chunk before the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * chunk can be submitted, we always wait for the response for the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * chunk before submitting the next chunk. Because requests are submitted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * lock step like this, there is no need to synchronize access to request data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  *   -EINPROGRESS: request has been submitted to SPU and response will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  *		   returned asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  *   -EAGAIN:      non-final request included a small amount of data, which for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  *		   efficiency we did not submit to the SPU, but instead stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  *		   to be submitted to the SPU with the next part of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *   other:        an error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) static int handle_ahash_req(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct ahash_request *req = ahash_request_cast(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	/* number of bytes still to be hashed in this req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	unsigned int nbytes_to_hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	unsigned int chunksize = 0;	/* length of hash carry + new data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * length of new data, not from hash carry, to be submitted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * this hw request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	unsigned int new_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	unsigned int __maybe_unused chunk_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct brcm_message *mssg;	/* mailbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct spu_request_opts req_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	struct spu_cipher_parms cipher_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	struct spu_hash_parms hash_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	struct spu_aead_parms aead_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	unsigned int local_nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	u32 spu_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	unsigned int digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	u16 rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * number of entries in src and dst sg. Always includes SPU msg header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * rx always includes a buffer to catch digest and STATUS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	u8 rx_frag_num = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	u8 tx_frag_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	flow_log("total_todo %u, total_sent %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		 rctx->total_todo, rctx->total_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	memset(&req_opts, 0, sizeof(req_opts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	memset(&cipher_parms, 0, sizeof(cipher_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	memset(&hash_parms, 0, sizeof(hash_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	memset(&aead_parms, 0, sizeof(aead_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	req_opts.bd_suppress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	hash_parms.alg = ctx->auth.alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	hash_parms.mode = ctx->auth.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	hash_parms.type = HASH_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	hash_parms.key_buf = (u8 *)ctx->authkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	hash_parms.key_len = ctx->authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * For hash algorithms below assignment looks bit odd but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * to differentiate between 128, 192, 256 bit key values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 * Based on the key values, hash algorithm is selected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * For example for 128 bit key, hash algorithm is AES-128.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	cipher_parms.type = ctx->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	mssg = &rctx->mb_mssg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	chunk_start = rctx->src_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * Compute the amount remaining to hash. This may include data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 * carried over from previous requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	chunksize = nbytes_to_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	    (chunksize > ctx->max_payload))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		chunksize = ctx->max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * If this is not a final request and the request data is not a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 * of a full block, then simply park the extra data and prefix it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * data for the next request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (!rctx->is_final) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		u16 new_len;  /* len of data to add to hash carry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		rem = chunksize % blocksize;   /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		if (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			/* chunksize not a multiple of blocksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			chunksize -= rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			if (chunksize == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 				/* Don't have a full block to submit to hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 				new_len = rem - rctx->hash_carry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				sg_copy_part_to_buf(req->src, dest, new_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 						    rctx->src_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				rctx->hash_carry_len = rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 				flow_log("Exiting with hash carry len: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 					 rctx->hash_carry_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				packet_dump("  buf: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 					    rctx->hash_carry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 					    rctx->hash_carry_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	/* if we have hash carry, then prefix it to the data in this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	local_nbuf = rctx->hash_carry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	rctx->hash_carry_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (local_nbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	new_data_len = chunksize - local_nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	/* Count number of sg entries to be used in this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				       new_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	/* AES hashing keeps key size in type field, so need to copy it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (hash_parms.alg == HASH_ALG_AES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		hash_parms.type = (enum hash_type)cipher_parms.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 					  hash_parms.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	hash_parms.digestsize =	digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	/* update the indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	rctx->total_sent += chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	/* if you sent a prebuf then that wasn't from this req->src */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	rctx->src_sent += new_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 							   hash_parms.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 							   chunksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 							   blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * If a non-first chunk, then include the digest returned from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 * previous chunk so that hw can add to it (except for AES types).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if ((hash_parms.type == HASH_TYPE_UPDT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	    (hash_parms.alg != HASH_ALG_AES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		hash_parms.key_buf = rctx->incr_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		hash_parms.key_len = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	atomic64_add(chunksize, &iproc_priv.bytes_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	flow_log("%s() final: %u nbuf: %u ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		 __func__, rctx->is_final, local_nbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		flow_log("max_payload infinite\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		flow_log("max_payload %u\n", ctx->max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	/* Prepend SPU header with type 3 BCM header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	hash_parms.prebuf_len = local_nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 					      BCM_HDR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 					      &req_opts, &cipher_parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 					      &hash_parms, &aead_parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 					      new_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (spu_hdr_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		pr_err("Failed to create SPU request header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * Determine total length of padding required. Put all padding in one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 				   0, 0, hash_parms.pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (spu->spu_tx_status_len())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		stat_pad_len = spu->spu_wordalign_padlen(db_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		rx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				     hash_parms.pad_len, ctx->auth.alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				     ctx->auth.mode, rctx->total_sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				     stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			      spu_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	flow_log("Data:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	 * Build mailbox message containing SPU request msg and rx buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	 * to catch response message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	memset(mssg, 0, sizeof(*mssg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	mssg->type = BRCM_MESSAGE_SPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	mssg->ctx = rctx;	/* Will be returned in response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	/* Create rx scatterlist to catch result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				     stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	/* Create tx scatterlist containing SPU request message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	tx_frag_num += rctx->src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (spu->spu_tx_status_len())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				     local_nbuf, new_data_len, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  * for an HMAC request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  * @req:  The HMAC request from the crypto API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * @ctx:  The session context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * Return: 0 if synchronous hash operation successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  *         -EINVAL if the hash algo is unrecognized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  *         any other value indicates an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) static int spu_hmac_outer_hash(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			       struct iproc_ctx_s *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	unsigned int blocksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	switch (ctx->auth.alg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	case HASH_ALG_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		rc = do_shash("md5", req->result, ctx->opad, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			      req->result, ctx->digestsize, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	case HASH_ALG_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			      req->result, ctx->digestsize, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	case HASH_ALG_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			      req->result, ctx->digestsize, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	case HASH_ALG_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			      req->result, ctx->digestsize, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	case HASH_ALG_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			      req->result, ctx->digestsize, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	case HASH_ALG_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			      req->result, ctx->digestsize, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		pr_err("%s() Error : unknown hmac type\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * ahash_req_done() - Process a hash result from the SPU hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  * @rctx: Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * Return: 0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  *         < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static int ahash_req_done(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	struct ahash_request *req = ahash_request_cast(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (spu->spu_type == SPU_TYPE_SPUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		/* byte swap the output from the UPDT function to network byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		 * order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		if (ctx->auth.alg == HASH_ALG_MD5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			__swab32s((u32 *)req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			__swab32s(((u32 *)req->result) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			__swab32s(((u32 *)req->result) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			__swab32s(((u32 *)req->result) + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			__swab32s(((u32 *)req->result) + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	flow_dump("  digest ", req->result, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/* if this an HMAC then do the outer hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (rctx->is_sw_hmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		err = spu_hmac_outer_hash(req, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		flow_dump("  hmac: ", req->result, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  * handle_ahash_resp() - Process a SPU response message for a hash request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * Checks if the entire crypto API request has been processed, and if so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  * invokes post processing on the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * @rctx: Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct ahash_request *req = ahash_request_cast(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	unsigned int blocksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * Save hash to use as input to next op if incremental. Might be copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * too much, but that's easier than figuring out actual digest size here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	flow_log("%s() blocksize:%u digestsize:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		 __func__, blocksize, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		ahash_req_done(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * a SPU response message for an AEAD request. Includes buffers to catch SPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  * message headers and the response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * @mssg:	mailbox message containing the receive sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  * @rctx:	crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * @rx_frag_num: number of scatterlist elements required to hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  *		SPU response message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * @assoc_len:	Length of associated data included in the crypto request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * @ret_iv_len: Length of IV returned in response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * @resp_len:	Number of bytes of response data expected to be written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *              dst buffer from crypto API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * @digestsize: Length of hash digest, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  * @stat_pad_len: Number of bytes required to pad the STAT field to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  *		a 4-byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  * when the request completes, whether the request is handled successfully or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  *   0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  *   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static int spu_aead_rx_sg_create(struct brcm_message *mssg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 				 struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				 struct iproc_reqctx_s *rctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				 u8 rx_frag_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				 unsigned int assoc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				 u32 ret_iv_len, unsigned int resp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				 unsigned int digestsize, u32 stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct scatterlist *sg;	/* used to build sgs in mbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	u32 datalen;		/* Number of bytes of response data expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	u32 assoc_buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	u8 data_padlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (ctx->is_rfc4543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		/* RFC4543: only pad after data, not after AAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 							  assoc_len + resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		assoc_buf_len = assoc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 							  resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 						assoc_len, ret_iv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 						rctx->is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (ctx->cipher.mode == CIPHER_MODE_CCM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		/* ICV (after data) must be in the next 32-bit word for CCM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 							 resp_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 							 data_padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (data_padlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		/* have to catch gcm pad in separate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		rx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				rctx->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (!mssg->spu.dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	sg = mssg->spu.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	sg_init_table(sg, rx_frag_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* Space for SPU message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (assoc_buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		 * Don't write directly to req->dst, because SPU may pad the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		 * assoc data in the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (resp_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		 * Copy in each dst sg entry from request, up to chunksize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		 * dst sg catches just the data. digest caught in separate buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 					 rctx->dst_nents, resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		if (datalen < (resp_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			       __func__, resp_len, datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	/* If GCM/CCM data is padded, catch padding in separate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (data_padlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	/* Always catch ICV in separate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	flow_log("stat_pad_len %u\n", stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (stat_pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  * SPU request message for an AEAD request. Includes SPU message headers and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  * request data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  * @mssg:	mailbox message containing the transmit sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * @rctx:	crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * @tx_frag_num: number of scatterlist elements required to construct the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  *		SPU request message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * @spu_hdr_len: length of SPU message header in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * @assoc:	crypto API associated data scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * @assoc_len:	length of associated data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * @assoc_nents: number of scatterlist entries containing assoc data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * @aead_iv_len: length of AEAD IV, if included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * @chunksize:	Number of bytes of request data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * @pad_len:	Number of pad bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  * @incl_icv:	If true, write separate ICV buffer after data and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  *              any padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  * when the request completes, whether the request is handled successfully or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  * there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  *   0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  *   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int spu_aead_tx_sg_create(struct brcm_message *mssg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 				 struct iproc_reqctx_s *rctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				 u8 tx_frag_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				 u32 spu_hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				 struct scatterlist *assoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				 unsigned int assoc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 				 int assoc_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				 unsigned int aead_iv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				 unsigned int chunksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct scatterlist *sg;	/* used to build sgs in mbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	struct scatterlist *assoc_sg = assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	u32 datalen;		/* Number of bytes of data to write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	u32 written;		/* Number of bytes of data written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	u32 assoc_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	u32 stat_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				rctx->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (!mssg->spu.src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	sg = mssg->spu.src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	sg_init_table(sg, tx_frag_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		   BCM_HDR_LEN + spu_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (assoc_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		/* Copy in each associated data sg entry from request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 					 assoc_nents, assoc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (written < assoc_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			pr_err("%s(): failed to copy assoc sg to mbox msg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (aead_iv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	if (aad_pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	datalen = chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if ((chunksize > ctx->digestsize) && incl_icv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		datalen -= ctx->digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	if (datalen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		/* For aead, a single msg should consume the entire src sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 					 rctx->src_nents, datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		if (written < datalen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			pr_err("%s(): failed to copy src sg to mbox msg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	if (pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	if (incl_icv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	stat_len = spu->spu_tx_status_len();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (stat_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * handle_aead_req() - Submit a SPU request message for the next chunk of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * current AEAD request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  * @rctx:  Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * Unlike other operation types, we assume the length of the request fits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * a single SPU request message. aead_enqueue() makes sure this is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * Comments for other op types regarding threads applies here as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * Unlike incremental hash ops, where the spu returns the entire hash for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * truncated algs like sha-224, the SPU returns just the truncated hash in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  * response to aead requests. So digestsize is always ctx->digestsize here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  * Return: -EINPROGRESS: crypto request has been accepted and result will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  *			 returned asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  *         Any other value indicates an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static int handle_aead_req(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct aead_request *req = container_of(areq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 						struct aead_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	unsigned int chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	unsigned int resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	u32 spu_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	u32 db_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	u32 stat_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	u32 pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	struct brcm_message *mssg;	/* mailbox message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	struct spu_request_opts req_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	struct spu_cipher_parms cipher_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	struct spu_hash_parms hash_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	struct spu_aead_parms aead_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	int assoc_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	bool incl_icv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	unsigned int digestsize = ctx->digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	/* number of entries in src and dst sg. Always includes SPU msg header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	u8 rx_frag_num = 2;	/* and STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	u8 tx_frag_num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	/* doing the whole thing at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	chunksize = rctx->total_todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	flow_log("%s: chunksize %u\n", __func__, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	memset(&req_opts, 0, sizeof(req_opts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	memset(&hash_parms, 0, sizeof(hash_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	memset(&aead_parms, 0, sizeof(aead_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	req_opts.is_inbound = !(rctx->is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	req_opts.auth_first = ctx->auth_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	req_opts.is_aead = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	req_opts.is_esp = ctx->is_esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	cipher_parms.alg = ctx->cipher.alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	cipher_parms.mode = ctx->cipher.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	cipher_parms.type = ctx->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	cipher_parms.key_buf = ctx->enckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	cipher_parms.key_len = ctx->enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	cipher_parms.iv_len = rctx->iv_ctr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	hash_parms.alg = ctx->auth.alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	hash_parms.mode = ctx->auth.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	hash_parms.type = HASH_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	hash_parms.key_buf = (u8 *)ctx->authkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	hash_parms.key_len = ctx->authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	hash_parms.digestsize = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		hash_parms.key_len = SHA224_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	aead_parms.assoc_size = req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (ctx->is_esp && !ctx->is_rfc4543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		 * 8-byte IV is included assoc data in request. SPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 * expects AAD to include just SPI and seqno. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		 * subtract off the IV len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		if (rctx->is_encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			aead_parms.return_iv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		aead_parms.ret_iv_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	 * Count number of sg entries from the crypto API request that are to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	 * be included in this mailbox message. For dst sg, don't count space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	 * for digest. Digest gets caught in a separate buffer and copied back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	 * to dst sg when processing response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (aead_parms.assoc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		assoc_nents = spu_sg_count(rctx->assoc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 					   aead_parms.assoc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	mssg = &rctx->mb_mssg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	rctx->total_sent = chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	rctx->src_sent = chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 				    aead_parms.assoc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				    aead_parms.ret_iv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				    rctx->is_encrypt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		rx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 						rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	if (ctx->auth.alg == HASH_ALG_AES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		hash_parms.type = (enum hash_type)ctx->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	/* General case AAD padding (CCM and RFC4543 special cases below) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 						 aead_parms.assoc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	/* General case data padding (CCM decrypt special case below) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 							   chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		 * 128-bit aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 					 ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 					 aead_parms.assoc_size + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		 * And when decrypting CCM, need to pad without including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		 * size of ICV which is tacked on to end of chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		if (!rctx->is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			aead_parms.data_pad_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 							chunksize - digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		/* CCM also requires software to rewrite portions of IV: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				       chunksize, rctx->is_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				       ctx->is_esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (ctx->is_rfc4543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		 * RFC4543: data is included in AAD, so don't pad after AAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		 * and pad data based on both AAD + data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		aead_parms.aad_pad_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		if (!rctx->is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 					ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 					aead_parms.assoc_size + chunksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 					digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 					ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 					aead_parms.assoc_size + chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		req_opts.is_rfc4543 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		incl_icv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		/* Copy ICV from end of src scatterlist to digest buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 				    req->assoclen + rctx->total_sent -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				    digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	atomic64_add(chunksize, &iproc_priv.bytes_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	/* Prepend SPU header with type 3 BCM header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 					      BCM_HDR_LEN, &req_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 					      &cipher_parms, &hash_parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 					      &aead_parms, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	/* Determine total length of padding. Put all padding in one buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 				   chunksize, aead_parms.aad_pad_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 				   aead_parms.data_pad_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	stat_pad_len = spu->spu_wordalign_padlen(db_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	if (stat_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		rx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	pad_len = aead_parms.data_pad_len + stat_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	if (pad_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 				     aead_parms.data_pad_len, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 				     ctx->auth.alg, ctx->auth.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				     rctx->total_sent, stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			      spu_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	packet_log("BD:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	 * Build mailbox message containing SPU request msg and rx buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	 * to catch response message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	memset(mssg, 0, sizeof(*mssg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	mssg->type = BRCM_MESSAGE_SPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	mssg->ctx = rctx;	/* Will be returned in response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/* Create rx scatterlist to catch result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	rx_frag_num += rctx->dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	resp_len = chunksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	 * sends entire digest back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	rx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		 * Input is ciphertxt plus ICV, but ICV not incl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		 * in output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		resp_len -= ctx->digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		if (resp_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			/* no rx frags to catch output data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			rx_frag_num -= rctx->dst_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 				    aead_parms.assoc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 				    aead_parms.ret_iv_len, resp_len, digestsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 				    stat_pad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	/* Create tx scatterlist containing SPU request message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	tx_frag_num += rctx->src_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	tx_frag_num += assoc_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (aead_parms.aad_pad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	if (aead_parms.iv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	if (spu->spu_tx_status_len())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		tx_frag_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 				    rctx->assoc, aead_parms.assoc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 				    assoc_nents, aead_parms.iv_len, chunksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 				    aead_parms.aad_pad_len, pad_len, incl_icv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * handle_aead_resp() - Process a SPU response message for an AEAD request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * @rctx:  Crypto request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static void handle_aead_resp(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct aead_request *req = container_of(areq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 						struct aead_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	struct iproc_ctx_s *ctx = rctx->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	u32 payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	unsigned int icv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	u32 result_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	/* See how much data was returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	flow_log("payload_len %u\n", payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	/* only count payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	atomic64_add(payload_len, &iproc_priv.bytes_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	if (req->assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			    req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	 * Copy the ICV back to the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	 * API doesn't expect ICV in dst buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	result_len = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (rctx->is_encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		icv_offset = req->assoclen + rctx->total_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 				      ctx->digestsize, icv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		result_len += ctx->digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	packet_log("response data:  ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	dump_sg(req->dst, req->assoclen, result_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (ctx->cipher.alg == CIPHER_ALG_AES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		if (ctx->cipher.mode == CIPHER_MODE_CCM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  * @rctx:  request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)  * Mailbox scatterlists are allocated for each chunk. So free them after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)  * processing each chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	/* mailbox message used to tx request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	struct brcm_message *mssg = &rctx->mb_mssg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	kfree(mssg->spu.src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	kfree(mssg->spu.dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	memset(mssg, 0, sizeof(struct brcm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)  * finish_req() - Used to invoke the complete callback from the requester when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)  * a request has been handled asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)  * @rctx:  Request context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  * @err:   Indicates whether the request was successful or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  * Ensures that cleanup has been done for request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static void finish_req(struct iproc_reqctx_s *rctx, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	struct crypto_async_request *areq = rctx->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	flow_log("%s() err:%d\n\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	/* No harm done if already called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	spu_chunk_cleanup(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	if (areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		areq->complete(areq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)  * spu_rx_callback() - Callback from mailbox framework with a SPU response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)  * @cl:		mailbox client structure for SPU driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  * @msg:	mailbox message containing SPU response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static void spu_rx_callback(struct mbox_client *cl, void *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct brcm_message *mssg = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct iproc_reqctx_s *rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	rctx = mssg->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	if (unlikely(!rctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		/* This is fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		pr_err("%s(): no request context", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		goto cb_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	/* process the SPU status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		if (err == SPU_INVALID_ICV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			atomic_inc(&iproc_priv.bad_icv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		err = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		goto cb_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	/* Process the SPU response message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	switch (rctx->ctx->alg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		handle_skcipher_resp(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		handle_ahash_resp(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		handle_aead_resp(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		goto cb_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	 * If this response does not complete the request, then send the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	 * request chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	if (rctx->total_sent < rctx->total_todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		/* Deallocate anything specific to previous chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		spu_chunk_cleanup(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		switch (rctx->ctx->alg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			err = handle_skcipher_req(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			err = handle_ahash_req(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 				 * we saved data in hash carry, but tell crypto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 				 * API we successfully completed request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 				err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			err = handle_aead_req(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		if (err == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			/* Successfully submitted request for next chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) cb_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	finish_req(rctx, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* ==================== Kernel Cryptographic API ==================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)  * @req:	Crypto API request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)  * @encrypt:	true if encrypting; false if decrypting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)  * Return: -EINPROGRESS if request accepted and result will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  *			asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  *	   < 0 if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	struct iproc_ctx_s *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	    crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	flow_log("%s() enc:%u\n", __func__, encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	rctx->parent = &req->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	rctx->is_encrypt = encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	rctx->bd_suppress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	rctx->total_todo = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	rctx->src_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	rctx->total_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	rctx->total_received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	rctx->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	/* Initialize current position in src and dst scatterlists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	rctx->src_sg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	rctx->src_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	rctx->src_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	rctx->dst_sg = req->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	rctx->dst_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	rctx->dst_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	    ctx->cipher.mode == CIPHER_MODE_GCM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	    ctx->cipher.mode == CIPHER_MODE_CCM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		rctx->iv_ctr_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		    crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		rctx->iv_ctr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	/* Choose a SPU to process this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	rctx->chan_idx = select_channel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	err = handle_skcipher_req(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		/* synchronous result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		spu_chunk_cleanup(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		      unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	err = verify_skcipher_des_key(cipher, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	ctx->cipher_type = CIPHER_TYPE_DES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			   unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	err = verify_skcipher_des3_key(cipher, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	ctx->cipher_type = CIPHER_TYPE_3DES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		      unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (ctx->cipher.mode == CIPHER_MODE_XTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		/* XTS includes two keys of equal length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		keylen = keylen / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		ctx->cipher_type = CIPHER_TYPE_AES128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		ctx->cipher_type = CIPHER_TYPE_AES192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		ctx->cipher_type = CIPHER_TYPE_AES256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			     unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	struct spu_cipher_parms cipher_parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	u32 alloc_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	flow_log("skcipher_setkey() keylen: %d\n", keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	flow_dump("  key: ", key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	switch (ctx->cipher.alg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	case CIPHER_ALG_DES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		err = des_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	case CIPHER_ALG_3DES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		err = threedes_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	case CIPHER_ALG_AES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		err = aes_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		pr_err("%s() Error: unknown cipher alg\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	memcpy(ctx->enckey, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	ctx->enckeylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	/* SPU needs XTS keys in the reverse order the crypto API presents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		unsigned int xts_keylen = keylen / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	if (spu->spu_type == SPU_TYPE_SPUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	else if (spu->spu_type == SPU_TYPE_SPU2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	cipher_parms.iv_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	cipher_parms.alg = ctx->cipher.alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	cipher_parms.mode = ctx->cipher.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	cipher_parms.type = ctx->cipher_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	cipher_parms.key_buf = ctx->enckey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	cipher_parms.key_len = ctx->enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	/* Prepend SPU request message with BCM header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	ctx->spu_req_hdr_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 				     &cipher_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 							  ctx->enckeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 							  false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static int skcipher_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	return skcipher_enqueue(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) static int skcipher_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	return skcipher_enqueue(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static int ahash_enqueue(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	const char *alg_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	rctx->parent = &req->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	rctx->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	rctx->bd_suppress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	/* Initialize position in src scatterlist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	rctx->src_sg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	rctx->src_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	rctx->src_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	rctx->dst_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	rctx->dst_skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	rctx->dst_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	/* SPU2 hardware does not compute hash of zero length data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		flow_log("Doing %sfinal %s zero-len hash request in software\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			 rctx->is_final ? "" : "non-", alg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		err = do_shash((unsigned char *)alg_name, req->result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			       NULL, 0, NULL, 0, ctx->authkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 			       ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 			flow_log("Hash request failed with error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	/* Choose a SPU to process this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	rctx->chan_idx = select_channel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	err = handle_ahash_req(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		/* synchronous result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		spu_chunk_cleanup(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		 * we saved data in hash carry, but tell crypto API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		 * we successfully completed request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static int __ahash_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	flow_log("%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	/* Initialize the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	rctx->hash_carry_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	rctx->is_final = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	rctx->total_todo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	rctx->src_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	rctx->total_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	rctx->total_received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	ctx->digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	/* If we add a hash whose digest is larger, catch it here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	rctx->is_sw_hmac = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 							  true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  * spu_no_incr_hash() - Determine whether incremental hashing is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  * @ctx:  Crypto session context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)  * SPU-2 does not support incremental hashing (we'll have to revisit and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)  * condition based on chip revision or device tree entry if future versions do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)  * support incremental hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)  * SPU-M also doesn't support incremental hashing of AES-XCBC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  * Return: true if incremental hashing is not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  *         false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	if (spu->spu_type == SPU_TYPE_SPU2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if ((ctx->auth.alg == HASH_ALG_AES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	    (ctx->auth.mode == HASH_MODE_XCBC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	/* Otherwise, incremental hashing is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) static int ahash_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	const char *alg_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	struct crypto_shash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	gfp_t gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	if (spu_no_incr_hash(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		 * If we get an incremental hashing request and it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		 * supported by the hardware, we need to handle it in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		 * by calling synchronous hash functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		hash = crypto_alloc_shash(alg_name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		if (IS_ERR(hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			ret = PTR_ERR(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		ctx->shash = kmalloc(sizeof(*ctx->shash) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 				     crypto_shash_descsize(hash), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		if (!ctx->shash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			goto err_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		ctx->shash->tfm = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		/* Set the key using data we already have from setkey */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		if (ctx->authkeylen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			ret = crypto_shash_setkey(hash, ctx->authkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 						  ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 				goto err_shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		/* Initialize hash w/ this key and other params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		ret = crypto_shash_init(ctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			goto err_shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		/* Otherwise call the internal function which uses SPU hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		ret = __ahash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) err_shash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	kfree(ctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) err_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	crypto_free_shash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static int __ahash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	rctx->total_todo += req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	rctx->src_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	return ahash_enqueue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) static int ahash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	u8 *tmpbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	gfp_t gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	if (spu_no_incr_hash(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		 * If we get an incremental hashing request and it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		 * supported by the hardware, we need to handle it in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		 * by calling synchronous hash functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		if (req->src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			nents = sg_nents(req->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		/* Copy data from req scatterlist to tmp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		tmpbuf = kmalloc(req->nbytes, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		if (!tmpbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 				req->nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			kfree(tmpbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		/* Call synchronous update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		kfree(tmpbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		/* Otherwise call the internal function which uses SPU hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		ret = __ahash_update(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) static int __ahash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	rctx->is_final = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	return ahash_enqueue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static int ahash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	if (spu_no_incr_hash(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		 * If we get an incremental hashing request and it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		 * supported by the hardware, we need to handle it in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		 * by calling synchronous hash functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		ret = crypto_shash_final(ctx->shash, req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		/* Done with hash, can deallocate it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		crypto_free_shash(ctx->shash->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		kfree(ctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		/* Otherwise call the internal function which uses SPU hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		ret = __ahash_final(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) static int __ahash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	rctx->total_todo += req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	rctx->src_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	rctx->is_final = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	return ahash_enqueue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) static int ahash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	u8 *tmpbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	gfp_t gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (spu_no_incr_hash(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		 * If we get an incremental hashing request and it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		 * supported by the hardware, we need to handle it in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		 * by calling synchronous hash functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		if (req->src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 			nents = sg_nents(req->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 			goto ahash_finup_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		/* Copy data from req scatterlist to tmp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		tmpbuf = kmalloc(req->nbytes, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		if (!tmpbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			goto ahash_finup_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 				req->nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			goto ahash_finup_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		/* Call synchronous update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 					 req->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		/* Otherwise call the internal function which uses SPU hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		return __ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) ahash_finup_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	kfree(tmpbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ahash_finup_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	/* Done with hash, can deallocate it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	crypto_free_shash(ctx->shash->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	kfree(ctx->shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) static int ahash_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	/* whole thing at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	err = __ahash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		err = __ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 			unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	flow_log("%s() ahash:%p key:%p keylen:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		 __func__, ahash, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	flow_dump("  key: ", key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (ctx->auth.alg == HASH_ALG_AES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		switch (keylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			ctx->cipher_type = CIPHER_TYPE_AES128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			ctx->cipher_type = CIPHER_TYPE_AES192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			ctx->cipher_type = CIPHER_TYPE_AES256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 			pr_err("%s() Error: Invalid key length\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		pr_err("%s() Error: unknown hash alg\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	memcpy(ctx->authkey, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	ctx->authkeylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) static int ahash_export(struct ahash_request *req, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	spu_exp->total_todo = rctx->total_todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	spu_exp->total_sent = rctx->total_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	spu_exp->hash_carry_len = rctx->hash_carry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) static int ahash_import(struct ahash_request *req, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	rctx->total_todo = spu_exp->total_todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	rctx->total_sent = spu_exp->total_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	rctx->hash_carry_len = spu_exp->hash_carry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 			     unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	unsigned int blocksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		 __func__, ahash, key, keylen, blocksize, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	flow_dump("  key: ", key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	if (keylen > blocksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		switch (ctx->auth.alg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		case HASH_ALG_MD5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 				      0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		case HASH_ALG_SHA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 				      0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		case HASH_ALG_SHA224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 				      0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		case HASH_ALG_SHA256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 				      0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		case HASH_ALG_SHA384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 				      0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		case HASH_ALG_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 				      0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		case HASH_ALG_SHA3_224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 				      NULL, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		case HASH_ALG_SHA3_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 				      NULL, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		case HASH_ALG_SHA3_384:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 				      NULL, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		case HASH_ALG_SHA3_512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 				      NULL, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			pr_err("%s() Error: unknown hash alg\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			pr_err("%s() Error %d computing shash for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 			       __func__, rc, hash_alg_name[ctx->auth.alg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		ctx->authkeylen = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		flow_log("  keylen > digestsize... hashed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		memcpy(ctx->authkey, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		ctx->authkeylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	 * Full HMAC operation in SPUM is not verified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	 * So keeping the generation of IPAD, OPAD and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	 * outer hashing in software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		memset(ctx->ipad + ctx->authkeylen, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		       blocksize - ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		ctx->authkeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		memcpy(ctx->opad, ctx->ipad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		for (index = 0; index < blocksize; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 			ctx->ipad[index] ^= HMAC_IPAD_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 			ctx->opad[index] ^= HMAC_OPAD_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		flow_dump("  ipad: ", ctx->ipad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		flow_dump("  opad: ", ctx->opad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	ctx->digestsize = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) static int ahash_hmac_init(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	unsigned int blocksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	flow_log("ahash_hmac_init()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	/* init the context as a hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	ahash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	if (!spu_no_incr_hash(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		/* SPU-M can do incr hashing but needs sw for outer HMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		rctx->is_sw_hmac = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		ctx->auth.mode = HASH_MODE_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		/* start with a prepended ipad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		rctx->hash_carry_len = blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		rctx->total_todo += blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static int ahash_hmac_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	return ahash_update(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static int ahash_hmac_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	return ahash_final(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static int ahash_hmac_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	return ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) static int ahash_hmac_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	unsigned int blocksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	/* Perform initialization and then call finup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	__ahash_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		 * SPU2 supports full HMAC implementation in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		 * hardware, need not to generate IPAD, OPAD and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		 * outer hash in software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		 * Only for hash key len > hash block size, SPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		 * expects to perform hashing on the key, shorten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		 * it to digest size and feed it as hash key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		rctx->is_sw_hmac = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		ctx->auth.mode = HASH_MODE_HMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		rctx->is_sw_hmac = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		ctx->auth.mode = HASH_MODE_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		/* start with a prepended ipad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		rctx->hash_carry_len = blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		rctx->total_todo += blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	return __ahash_finup(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /* aead helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) static int aead_need_fallback(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	u32 payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	 * and AAD are both 0 bytes long. So use fallback in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	    (req->assoclen == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	    (spu->spu_type == SPU_TYPE_SPUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	    (ctx->digestsize != 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		flow_log("%s() AES CCM needs fallback for digest size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 			 __func__, ctx->digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	 * when AAD size is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	    (req->assoclen == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 			 __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	 * RFC4106 and RFC4543 cannot handle the case where AAD is other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	 * 16 or 20 bytes long. So use fallback in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	if (ctx->cipher.mode == CIPHER_MODE_GCM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	    ctx->cipher.alg == CIPHER_ALG_AES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	    rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	    req->assoclen != 16 && req->assoclen != 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		flow_log("RFC4106/RFC4543 needs fallback for assoclen"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 			 " other than 16 or 20 bytes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	payload_len = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	if (spu->spu_type == SPU_TYPE_SPUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		payload_len += req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	flow_log("%s() payload len: %u\n", __func__, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		return payload_len > ctx->max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) static void aead_complete(struct crypto_async_request *areq, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	struct aead_request *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	    container_of(areq, struct aead_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	flow_log("%s() err:%d\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	areq->tfm = crypto_aead_tfm(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	areq->complete = rctx->old_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	areq->data = rctx->old_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	areq->complete(areq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	u32 req_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	if (ctx->fallback_cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		/* Store the cipher tfm and then use the fallback tfm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		rctx->old_tfm = tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		aead_request_set_tfm(req, ctx->fallback_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		 * Save the callback and chain ourselves in, so we can restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		 * the tfm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		rctx->old_complete = req->base.complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		rctx->old_data = req->base.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		req_flags = aead_request_flags(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		aead_request_set_callback(req, req_flags, aead_complete, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		err = is_encrypt ? crypto_aead_encrypt(req) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		    crypto_aead_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 			 * fallback was synchronous (did not return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 			 * -EINPROGRESS). So restore request state here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 			aead_request_set_callback(req, req_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 						  rctx->old_complete, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 			req->base.data = rctx->old_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 			aead_request_set_tfm(req, aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 			flow_log("%s() fallback completed successfully\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 				 __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) static int aead_enqueue(struct aead_request *req, bool is_encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	if (req->assoclen > MAX_ASSOC_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		pr_err
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		     __func__, req->assoclen, MAX_ASSOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	rctx->parent = &req->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	rctx->is_encrypt = is_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	rctx->bd_suppress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	rctx->total_todo = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	rctx->src_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	rctx->total_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	rctx->total_received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	rctx->is_sw_hmac = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	rctx->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	/* assoc data is at start of src sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	rctx->assoc = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	 * Init current position in src scatterlist to be after assoc data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	 * src_skip set to buffer offset where data begins. (Assoc data could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	 * end in the middle of a buffer.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 			     &rctx->src_skip) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		pr_err("%s() Error: Unable to find start of src data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 		       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	rctx->src_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	rctx->dst_nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	if (req->dst == req->src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		rctx->dst_sg = rctx->src_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		rctx->dst_skip = rctx->src_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		 * Expect req->dst to have room for assoc data followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		 * output data and ICV, if encrypt. So initialize dst_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		 * to point beyond assoc len offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 				     &rctx->dst_skip) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 			pr_err("%s() Error: Unable to find start of dst data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	    ctx->cipher.mode == CIPHER_MODE_GCM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		rctx->iv_ctr_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			ctx->salt_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 			crypto_aead_ivsize(crypto_aead_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		rctx->iv_ctr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	rctx->hash_carry_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	flow_log("  src sg: %p\n", req->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	flow_log("  rctx->src_sg: %p, src_skip %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		 rctx->src_sg, rctx->src_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	flow_log("  dst sg: %p\n", req->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		 rctx->dst_sg, rctx->dst_skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	flow_log("  authkeylen:%u\n", ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		flow_log("  max_payload infinite");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		flow_log("  max_payload: %u\n", ctx->max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	if (unlikely(aead_need_fallback(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		return aead_do_fallback(req, is_encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	 * Do memory allocations for request after fallback check, because if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	 * do fallback, we won't call finish_req() to dealloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	if (rctx->iv_ctr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		if (ctx->salt_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 			       ctx->salt, ctx->salt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		       req->iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	rctx->chan_idx = select_channel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	err = handle_aead_req(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	if (err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		/* synchronous result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		spu_chunk_cleanup(rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) static int aead_authenc_setkey(struct crypto_aead *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			       const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	struct crypto_authenc_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		 keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	flow_dump("  key: ", key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	if (keys.enckeylen > MAX_KEY_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	    keys.authkeylen > MAX_KEY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	ctx->enckeylen = keys.enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	ctx->authkeylen = keys.authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	/* May end up padding auth key. So make sure it's zeroed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	switch (ctx->alg->cipher_info.alg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	case CIPHER_ALG_DES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		ctx->cipher_type = CIPHER_TYPE_DES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	case CIPHER_ALG_3DES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		ctx->cipher_type = CIPHER_TYPE_3DES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	case CIPHER_ALG_AES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		switch (ctx->enckeylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 			ctx->cipher_type = CIPHER_TYPE_AES128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 			ctx->cipher_type = CIPHER_TYPE_AES192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 			ctx->cipher_type = CIPHER_TYPE_AES256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 			goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		pr_err("%s() Error: Unknown cipher alg\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		 ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	/* setkey the fallback just in case we needto use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	if (ctx->fallback_cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		flow_log("  running fallback setkey()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		ctx->fallback_cipher->base.crt_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 			flow_log("  fallback setkey() returned:%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 							  ctx->enckeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 							  false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) badkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	ctx->enckeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	ctx->authkeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	ctx->digestsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 			       const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	flow_log("%s() keylen:%u\n", __func__, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	flow_dump("  key: ", key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	if (!ctx->is_esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		ctx->digestsize = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	ctx->enckeylen = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	ctx->authkeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	switch (ctx->enckeylen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	case AES_KEYSIZE_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		ctx->cipher_type = CIPHER_TYPE_AES128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	case AES_KEYSIZE_192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		ctx->cipher_type = CIPHER_TYPE_AES192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	case AES_KEYSIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		ctx->cipher_type = CIPHER_TYPE_AES256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 		goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	memcpy(ctx->enckey, key, ctx->enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		 ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	/* setkey the fallback just in case we need to use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	if (ctx->fallback_cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		flow_log("  running fallback setkey()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 		ctx->fallback_cipher->base.crt_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 					 keylen + ctx->salt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 			flow_log("  fallback setkey() returned:%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 							  ctx->enckeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 							  false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		 ctx->authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) badkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	ctx->enckeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	ctx->authkeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	ctx->digestsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)  * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)  * @cipher: AEAD structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)  * @key:    Key followed by 4 bytes of salt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)  * @keylen: Length of key plus salt, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)  * Extracts salt from key and stores it to be prepended to IV on each request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)  * Digest is always 16 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)  * Return: Value from generic gcm setkey.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 			       const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	flow_log("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	if (keylen < GCM_ESP_SALT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	ctx->salt_len = GCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	keylen -= GCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	ctx->is_esp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	return aead_gcm_ccm_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)  * cipher: AEAD structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)  * key:    Key followed by 4 bytes of salt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)  * keylen: Length of key plus salt, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)  * Extracts salt from key and stores it to be prepended to IV on each request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)  * Digest is always 16 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)  * Return: Value from generic gcm setkey.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 				  const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	flow_log("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	if (keylen < GCM_ESP_SALT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	ctx->salt_len = GCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	keylen -= GCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	ctx->is_esp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	ctx->is_rfc4543 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	return aead_gcm_ccm_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)  * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)  * @cipher: AEAD structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)  * @key:    Key followed by 4 bytes of salt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)  * @keylen: Length of key plus salt, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)  * Extracts salt from key and stores it to be prepended to IV on each request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)  * Digest is always 16 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)  * Return: Value from generic ccm setkey.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 			       const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	flow_log("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	if (keylen < CCM_ESP_SALT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	ctx->salt_len = CCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	keylen -= CCM_ESP_SALT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	ctx->is_esp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	return aead_gcm_ccm_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	flow_log("%s() authkeylen:%u authsize:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		 __func__, ctx->authkeylen, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	ctx->digestsize = authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	/* setkey the fallback just in case we needto use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	if (ctx->fallback_cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		flow_log("  running fallback setauth()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 			flow_log("  fallback setauth() returned:%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) static int aead_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		 req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	flow_log("  assoc_len:%u\n", req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	return aead_enqueue(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) static int aead_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	flow_log("  assoc_len:%u\n", req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	return aead_enqueue(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) /* ==================== Supported Cipher Algorithms ==================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) static struct iproc_alg_s driver_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 			.cra_name = "gcm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			.cra_driver_name = "gcm-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 		 .setkey = aead_gcm_ccm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		 .ivsize = GCM_AES_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		.maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 			 .mode = CIPHER_MODE_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		       .mode = HASH_MODE_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 			.cra_name = "ccm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 			.cra_driver_name = "ccm-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 		 .setkey = aead_gcm_ccm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 		 .ivsize = CCM_AES_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		.maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 			 .mode = CIPHER_MODE_CCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 		       .mode = HASH_MODE_CCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 			.cra_name = "rfc4106(gcm(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 			.cra_driver_name = "gcm-aes-esp-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 		 .setkey = aead_gcm_esp_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		 .ivsize = GCM_RFC4106_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		 .maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 			 .mode = CIPHER_MODE_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		       .mode = HASH_MODE_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 			.cra_name = "rfc4309(ccm(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 			.cra_driver_name = "ccm-aes-esp-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		 .setkey = aead_ccm_esp_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		 .ivsize = CCM_AES_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		 .maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 			 .mode = CIPHER_MODE_CCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		       .mode = HASH_MODE_CCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 			.cra_name = "rfc4543(gcm(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 			.cra_driver_name = "gmac-aes-esp-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		 .setkey = rfc4543_gcm_esp_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 		 .ivsize = GCM_RFC4106_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 		 .maxauthsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 			 .mode = CIPHER_MODE_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		       .mode = HASH_MODE_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 			.cra_name = "authenc(hmac(md5),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		.ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		.maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		       .alg = HASH_ALG_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 		 .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 		 .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		       .alg = HASH_ALG_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 			.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		 .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 		 .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		       .alg = HASH_ALG_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 			.cra_name = "authenc(hmac(md5),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 			.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 		 .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		 .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 		       .alg = HASH_ALG_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 			.cra_name = "authenc(hmac(sha1),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 			.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 		 .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		 .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 		       .alg = HASH_ALG_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 			.cra_name = "authenc(hmac(sha224),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 			.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		 .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		 .maxauthsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		       .alg = HASH_ALG_SHA224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 			.cra_name = "authenc(hmac(sha256),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 			.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 		 .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		 .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 		       .alg = HASH_ALG_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 			.cra_name = "authenc(hmac(sha384),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 			.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		 .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		 .maxauthsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		       .alg = HASH_ALG_SHA384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 			.cra_name = "authenc(hmac(sha512),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 			.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 		 .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 		 .maxauthsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		       .alg = HASH_ALG_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		 .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 		 .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 		       .alg = HASH_ALG_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 		 .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		 .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 		       .alg = HASH_ALG_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		 .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		 .maxauthsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		       .alg = HASH_ALG_SHA224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 		 .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 		 .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		       .alg = HASH_ALG_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		 .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		 .maxauthsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		       .alg = HASH_ALG_SHA384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	 .type = CRYPTO_ALG_TYPE_AEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	 .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		 .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 				     CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 				     CRYPTO_ALG_ALLOCATES_MEMORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		 .setkey = aead_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		 .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 		 .maxauthsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		       .alg = HASH_ALG_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	 .auth_first = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) /* SKCIPHER algorithms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 			.base.cra_name = "ofb(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 			.base.cra_driver_name = "ofb-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 			.base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 			.min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 			.max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 			.ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 			 .mode = CIPHER_MODE_OFB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 			.base.cra_name = "cbc(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 			.base.cra_driver_name = "cbc-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 			.base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 			.min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 			.max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 			.ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 			.base.cra_name = "ecb(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 			.base.cra_driver_name = "ecb-des-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 			.base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 			.min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 			.max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 			.ivsize = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 			 .alg = CIPHER_ALG_DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 			 .mode = CIPHER_MODE_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 			.base.cra_name = "ofb(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 			.base.cra_driver_name = "ofb-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 			.min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 			.max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 			.ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 			 .mode = CIPHER_MODE_OFB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 			.base.cra_name = "cbc(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 			.base.cra_driver_name = "cbc-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 			.min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 			.max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 			.ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 			.base.cra_name = "ecb(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 			.base.cra_driver_name = "ecb-des3-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 			.min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 			.max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 			.ivsize = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 			 .alg = CIPHER_ALG_3DES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 			 .mode = CIPHER_MODE_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 			.base.cra_name = "ofb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			.base.cra_driver_name = "ofb-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 			.base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 			.min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 			.max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 			.ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 			 .mode = CIPHER_MODE_OFB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 			.base.cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 			.base.cra_driver_name = "cbc-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 			.base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 			.min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 			.max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 			.ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 			 .mode = CIPHER_MODE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 			.base.cra_name = "ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 			.base.cra_driver_name = "ecb-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 			.base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 			.min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 			.max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 			.ivsize = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 			 .mode = CIPHER_MODE_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 			.base.cra_name = "ctr(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 			.base.cra_driver_name = "ctr-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 			.base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 			.min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 			.max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 			.ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 			 .mode = CIPHER_MODE_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 	 .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 			.base.cra_name = "xts(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 			.base.cra_driver_name = "xts-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 			.base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 			.ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 			 .alg = CIPHER_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 			 .mode = CIPHER_MODE_XTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 		       .alg = HASH_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		       .mode = HASH_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) /* AHASH algorithms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 		      .halg.digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 				    .cra_name = "md5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 				    .cra_driver_name = "md5-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 				    .cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 						 CRYPTO_ALG_ALLOCATES_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		       .alg = HASH_ALG_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		      .halg.digestsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 				    .cra_name = "hmac(md5)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 				    .cra_driver_name = "hmac-md5-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 		       .alg = HASH_ALG_MD5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	{.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 		      .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 				    .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 				    .cra_driver_name = "sha1-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 				    .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		       .alg = HASH_ALG_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	{.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 		      .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 				    .cra_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 				    .cra_driver_name = "hmac-sha1-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 				    .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 		       .alg = HASH_ALG_SHA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 	{.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 			.halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 			.halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 				    .cra_name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 				    .cra_driver_name = "sha224-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 				    .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 		       .alg = HASH_ALG_SHA224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	{.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		      .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 				    .cra_name = "hmac(sha224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 				    .cra_driver_name = "hmac-sha224-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 				    .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 		       .alg = HASH_ALG_SHA224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 	{.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		      .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 				    .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 				    .cra_driver_name = "sha256-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 				    .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 		       .alg = HASH_ALG_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	{.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 		      .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 				    .cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 				    .cra_driver_name = "hmac-sha256-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 				    .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 		       .alg = HASH_ALG_SHA256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	.type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 		      .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 				    .cra_name = "sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 				    .cra_driver_name = "sha384-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 				    .cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 		       .alg = HASH_ALG_SHA384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 		      .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 				    .cra_name = "hmac(sha384)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 				    .cra_driver_name = "hmac-sha384-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 				    .cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 		       .alg = HASH_ALG_SHA384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 		      .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 				    .cra_name = "sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 				    .cra_driver_name = "sha512-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 				    .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 		       .alg = HASH_ALG_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 		      .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 				    .cra_name = "hmac(sha512)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 				    .cra_driver_name = "hmac-sha512-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 				    .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 		       .alg = HASH_ALG_SHA512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 				    .cra_name = "sha3-224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 				    .cra_driver_name = "sha3-224-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 		       .alg = HASH_ALG_SHA3_224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 				    .cra_name = "hmac(sha3-224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 				    .cra_driver_name = "hmac-sha3-224-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 		       .alg = HASH_ALG_SHA3_224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 		       .mode = HASH_MODE_HMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 				    .cra_name = "sha3-256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 				    .cra_driver_name = "sha3-256-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 		       .alg = HASH_ALG_SHA3_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 				    .cra_name = "hmac(sha3-256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 				    .cra_driver_name = "hmac-sha3-256-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 		       .alg = HASH_ALG_SHA3_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 				    .cra_name = "sha3-384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 				    .cra_driver_name = "sha3-384-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 		       .alg = HASH_ALG_SHA3_384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 				    .cra_name = "hmac(sha3-384)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 				    .cra_driver_name = "hmac-sha3-384-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 		       .alg = HASH_ALG_SHA3_384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 				    .cra_name = "sha3-512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 				    .cra_driver_name = "sha3-512-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 		       .alg = HASH_ALG_SHA3_512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 		       .mode = HASH_MODE_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 				    .cra_name = "hmac(sha3-512)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 				    .cra_driver_name = "hmac-sha3-512-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 		       .alg = HASH_ALG_SHA3_512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 		       .mode = HASH_MODE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 		      .halg.digestsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 				    .cra_name = "xcbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 				    .cra_driver_name = "xcbc-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 				    .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 		       .mode = HASH_MODE_XCBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	 .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	 .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		      .halg.digestsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 		      .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 				    .cra_name = "cmac(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 				    .cra_driver_name = "cmac-aes-iproc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 				    .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 		      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	 .cipher_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 			 .alg = CIPHER_ALG_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 			 .mode = CIPHER_MODE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 			 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	 .auth_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 		       .alg = HASH_ALG_AES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 		       .mode = HASH_MODE_CMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 		       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) static int generic_cra_init(struct crypto_tfm *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 			    struct iproc_alg_s *cipher_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	flow_log("%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 	ctx->alg = cipher_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 	ctx->cipher = cipher_alg->cipher_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 	ctx->auth = cipher_alg->auth_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	ctx->auth_first = cipher_alg->auth_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 						    ctx->cipher.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 						    blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 	ctx->fallback_cipher = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 	ctx->enckeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 	ctx->authkeylen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 	atomic_inc(&iproc_priv.stream_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 	atomic_inc(&iproc_priv.session_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	struct iproc_alg_s *cipher_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 	flow_log("%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 	cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	return generic_cra_init(tfm, cipher_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) static int ahash_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 	struct crypto_alg *alg = tfm->__crt_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	struct iproc_alg_s *cipher_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 				  alg.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 	err = generic_cra_init(tfm, cipher_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	flow_log("%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	 * export state size has to be < 512 bytes. So don't include msg bufs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	 * in state size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 				 sizeof(struct iproc_reqctx_s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) static int aead_cra_init(struct crypto_aead *aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 	struct crypto_alg *alg = tfm->__crt_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 						      alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 	int err = generic_cra_init(tfm, cipher_alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	flow_log("%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 	ctx->is_esp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	ctx->salt_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 	ctx->salt_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 	/* random first IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	get_random_bytes(ctx->iv, MAX_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 			flow_log("%s() creating fallback cipher\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 			ctx->fallback_cipher =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 			    crypto_alloc_aead(alg->cra_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 					      CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 					      CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 			if (IS_ERR(ctx->fallback_cipher)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 				pr_err("%s() Error: failed to allocate fallback for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 				       __func__, alg->cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 				return PTR_ERR(ctx->fallback_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) static void generic_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 	atomic_dec(&iproc_priv.session_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	generic_cra_exit(crypto_skcipher_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) static void aead_cra_exit(struct crypto_aead *aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	generic_cra_exit(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	if (ctx->fallback_cipher) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 		crypto_free_aead(ctx->fallback_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 		ctx->fallback_cipher = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)  * spu_functions_register() - Specify hardware-specific SPU functions based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)  * SPU type read from device tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)  * @dev:	device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314)  * @spu_type:	SPU hardware generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315)  * @spu_subtype: SPU hardware version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) static void spu_functions_register(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 				   enum spu_spu_type spu_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 				   enum spu_spu_subtype spu_subtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	if (spu_type == SPU_TYPE_SPUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 		dev_dbg(dev, "Registering SPUM functions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 		spu->spu_payload_length = spum_payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 		spu->spu_response_hdr_len = spum_response_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 		spu->spu_hash_pad_len = spum_hash_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 		spu->spu_assoc_resp_len = spum_assoc_resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 		spu->spu_aead_ivlen = spum_aead_ivlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 		spu->spu_hash_type = spum_hash_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 		spu->spu_digest_size = spum_digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 		spu->spu_create_request = spum_create_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 		spu->spu_cipher_req_init = spum_cipher_req_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 		spu->spu_cipher_req_finish = spum_cipher_req_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 		spu->spu_request_pad = spum_request_pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 		spu->spu_tx_status_len = spum_tx_status_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 		spu->spu_rx_status_len = spum_rx_status_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 		spu->spu_status_process = spum_status_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 		spu->spu_ccm_update_iv = spum_ccm_update_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 		spu->spu_wordalign_padlen = spum_wordalign_padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 		dev_dbg(dev, "Registering SPU2 functions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 		spu->spu_payload_length = spu2_payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 		spu->spu_response_hdr_len = spu2_response_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 		spu->spu_hash_pad_len = spu2_hash_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 		spu->spu_aead_ivlen = spu2_aead_ivlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 		spu->spu_hash_type = spu2_hash_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 		spu->spu_digest_size = spu2_digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 		spu->spu_create_request = spu2_create_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 		spu->spu_cipher_req_init = spu2_cipher_req_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 		spu->spu_request_pad = spu2_request_pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 		spu->spu_tx_status_len = spu2_tx_status_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 		spu->spu_rx_status_len = spu2_rx_status_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 		spu->spu_status_process = spu2_status_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)  * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)  * channel for the SPU being probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)  * @dev:  SPU driver device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)  * Return: 0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)  *	   < 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) static int spu_mb_init(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	struct mbox_client *mcl = &iproc_priv.mcl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 				  sizeof(struct mbox_chan *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 	if (!iproc_priv.mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	mcl->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 	mcl->tx_block = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 	mcl->tx_tout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 	mcl->knows_txdone = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 	mcl->rx_callback = spu_rx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 	mcl->tx_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 		iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 		if (IS_ERR(iproc_priv.mbox[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 			err = PTR_ERR(iproc_priv.mbox[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 			dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 				"Mbox channel %d request failed with err %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 				i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 			iproc_priv.mbox[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 			goto free_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) free_channels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 		if (iproc_priv.mbox[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 			mbox_free_channel(iproc_priv.mbox[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) static void spu_mb_release(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 	for (i = 0; i < iproc_priv.spu.num_chan; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		mbox_free_channel(iproc_priv.mbox[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) static void spu_counters_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 	atomic_set(&iproc_priv.session_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 	atomic_set(&iproc_priv.stream_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	atomic64_set(&iproc_priv.bytes_in, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	atomic64_set(&iproc_priv.bytes_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 	for (i = 0; i < SPU_OP_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 		atomic_set(&iproc_priv.op_counts[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 		atomic_set(&iproc_priv.setkey_cnt[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 	for (i = 0; i < CIPHER_ALG_LAST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 		for (j = 0; j < CIPHER_MODE_LAST; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 	for (i = 0; i < HASH_ALG_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 		atomic_set(&iproc_priv.hash_cnt[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 		atomic_set(&iproc_priv.hmac_cnt[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 	for (i = 0; i < AEAD_TYPE_LAST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 		atomic_set(&iproc_priv.aead_cnt[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 	atomic_set(&iproc_priv.mb_no_spc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 	atomic_set(&iproc_priv.mb_send_fail, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 	atomic_set(&iproc_priv.bad_icv, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 	struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 	crypto->base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 	crypto->base.cra_priority = cipher_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 	crypto->base.cra_alignmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 	crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 	crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 				 CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 				 CRYPTO_ALG_KERN_DRIVER_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 	crypto->init = skcipher_init_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 	crypto->exit = skcipher_exit_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 	crypto->setkey = skcipher_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 	crypto->encrypt = skcipher_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 	crypto->decrypt = skcipher_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 	err = crypto_register_skcipher(crypto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 	/* Mark alg as having been registered, if successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 		driver_alg->registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 	pr_debug("  registered skcipher %s\n", crypto->base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) static int spu_register_ahash(struct iproc_alg_s *driver_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 	struct ahash_alg *hash = &driver_alg->alg.hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	    (spu->spu_type == SPU_TYPE_SPUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 	hash->halg.base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 	hash->halg.base.cra_priority = hash_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 	hash->halg.base.cra_alignmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 	hash->halg.base.cra_init = ahash_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 	hash->halg.base.cra_exit = generic_cra_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 	hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 				    CRYPTO_ALG_ALLOCATES_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	hash->halg.statesize = sizeof(struct spu_hash_export_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 		hash->init = ahash_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 		hash->update = ahash_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 		hash->final = ahash_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 		hash->finup = ahash_finup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 		hash->digest = ahash_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 		if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 		    ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 		    (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 			hash->setkey = ahash_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 		hash->setkey = ahash_hmac_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 		hash->init = ahash_hmac_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 		hash->update = ahash_hmac_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 		hash->final = ahash_hmac_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 		hash->finup = ahash_hmac_finup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 		hash->digest = ahash_hmac_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	hash->export = ahash_export;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	hash->import = ahash_import;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 	err = crypto_register_ahash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 	/* Mark alg as having been registered, if successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 		driver_alg->registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	pr_debug("  registered ahash %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 		 hash->halg.base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) static int spu_register_aead(struct iproc_alg_s *driver_alg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 	struct aead_alg *aead = &driver_alg->alg.aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	aead->base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 	aead->base.cra_priority = aead_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 	aead->base.cra_alignmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 	aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	/* setkey set in alg initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 	aead->setauthsize = aead_setauthsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 	aead->encrypt = aead_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 	aead->decrypt = aead_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 	aead->init = aead_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 	aead->exit = aead_cra_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 	err = crypto_register_aead(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 	/* Mark alg as having been registered, if successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 		driver_alg->registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) /* register crypto algorithms the device supports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) static int spu_algs_register(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 		switch (driver_algs[i].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 		case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 			err = spu_register_skcipher(&driver_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 		case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 			err = spu_register_ahash(&driver_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 		case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 			err = spu_register_aead(&driver_algs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 			dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 				"iproc-crypto: unknown alg type: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 				driver_algs[i].type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 			dev_err(dev, "alg registration failed with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 			goto err_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) err_algs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 	for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 		/* Skip any algorithm not registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 		if (!driver_algs[j].registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 		switch (driver_algs[j].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 		case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 			crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 			driver_algs[j].registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 		case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 			crypto_unregister_ahash(&driver_algs[j].alg.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 			driver_algs[j].registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 		case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 			crypto_unregister_aead(&driver_algs[j].alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 			driver_algs[j].registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) /* ==================== Kernel Platform API ==================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) static struct spu_type_subtype spum_ns2_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) static struct spu_type_subtype spum_nsp_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) static struct spu_type_subtype spu2_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) static struct spu_type_subtype spu2_v2_types = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) static const struct of_device_id bcm_spu_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 		.compatible = "brcm,spum-crypto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 		.data = &spum_ns2_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 		.compatible = "brcm,spum-nsp-crypto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 		.data = &spum_nsp_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 		.compatible = "brcm,spu2-crypto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 		.data = &spu2_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 		.compatible = "brcm,spu2-v2-crypto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 		.data = &spu2_v2_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 	{ /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) static int spu_dt_read(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 	struct resource *spu_ctrl_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 	const struct spu_type_subtype *matched_spu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) 	struct device_node *dn = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 	/* Count number of mailbox channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 	spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 	matched_spu_type = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 	if (!matched_spu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 		dev_err(dev, "Failed to match device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 	spu->spu_type = matched_spu_type->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 	spu->spu_subtype = matched_spu_type->subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 	for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 		platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 		spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 		if (IS_ERR(spu->reg_vbase[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 			err = PTR_ERR(spu->reg_vbase[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 			dev_err(dev, "Failed to map registers: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 			spu->reg_vbase[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	spu->num_spu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 	dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) static int bcm_spu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	struct spu_hw *spu = &iproc_priv.spu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	iproc_priv.pdev  = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 	platform_set_drvdata(iproc_priv.pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 			     &iproc_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 	err = spu_dt_read(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 	err = spu_mb_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 	if (spu->spu_type == SPU_TYPE_SPUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 		iproc_priv.bcm_hdr_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	else if (spu->spu_type == SPU_TYPE_SPU2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 		iproc_priv.bcm_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 	spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 	spu_counters_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	spu_setup_debugfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 	err = spu_algs_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 		goto fail_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) fail_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 	spu_free_debugfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 	spu_mb_release(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 	dev_err(dev, "%s failed with error %d.\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) static int bcm_spu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 	char *cdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 		 * Not all algorithms were registered, depending on whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 		 * hardware is SPU or SPU2.  So here we make sure to skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 		 * those algorithms that were not previously registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 		if (!driver_algs[i].registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 		switch (driver_algs[i].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 		case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 			crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 			dev_dbg(dev, "  unregistered cipher %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 				driver_algs[i].alg.skcipher.base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 			driver_algs[i].registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 		case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 			crypto_unregister_ahash(&driver_algs[i].alg.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 			dev_dbg(dev, "  unregistered hash %s\n", cdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 			driver_algs[i].registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 		case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 			crypto_unregister_aead(&driver_algs[i].alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 			dev_dbg(dev, "  unregistered aead %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 				driver_algs[i].alg.aead.base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 			driver_algs[i].registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 	spu_free_debugfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 	spu_mb_release(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) /* ===== Kernel Module API ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) static struct platform_driver bcm_spu_pdriver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 		   .name = "brcm-spu-crypto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 		   },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 	.probe = bcm_spu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 	.remove = bcm_spu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) module_platform_driver(bcm_spu_pdriver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) MODULE_LICENSE("GPL v2");