^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is part of the Chelsio T6 Crypto driver for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Written and Maintained by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Manoj Malviya (manojmalviya@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Atul Gupta (atul.gupta@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Jitendra Lulla (jlulla@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Harsh Jain (harsh@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define pr_fmt(fmt) "chcr:" fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <crypto/gcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <crypto/ctr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <crypto/gf128mul.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <crypto/null.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "t4fw_api.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "t4_msg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include "chcr_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "chcr_algo.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include "chcr_crypto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define IV AES_BLOCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static unsigned int sgl_ent_len[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 0, 0, 16, 24, 40, 48, 64, 72, 88,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 96, 112, 120, 136, 144, 160, 168, 184,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 192, 208, 216, 232, 240, 256, 264, 280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 288, 304, 312, 328, 336, 352, 360, 376
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static unsigned int dsgl_ent_len[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 0, 32, 32, 48, 48, 64, 64, 80, 80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 112, 112, 128, 128, 144, 144, 160, 160,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 192, 192, 208, 208, 224, 224, 240, 240,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 272, 272, 288, 288, 304, 304, 320, 320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static u32 round_constant[11] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 0x01000000, 0x02000000, 0x04000000, 0x08000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 0x10000000, 0x20000000, 0x40000000, 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 0x1B000000, 0x36000000, 0x6C000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int chcr_handle_cipher_resp(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned char *input, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return ctx->crypto_ctx->aeadctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ctx->crypto_ctx->ablkctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return ctx->crypto_ctx->hmacctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return gctx->ctx->gcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return gctx->ctx->authenc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return container_of(ctx->dev, struct uld_ctx, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline int is_ofld_imm(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return (skb->len <= SGE_MAX_WR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int entlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned int less;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned int skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) while (sg && skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (sg_dma_len(sg) <= skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) skip -= sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) skip_len = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) while (sg && reqlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) less = min(reqlen, sg_dma_len(sg) - skip_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) nents += DIV_ROUND_UP(less, entlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) reqlen -= less;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline int get_aead_subtype(struct crypto_aead *aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct aead_alg *alg = crypto_aead_alg(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct chcr_alg_template *chcr_crypto_alg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) container_of(alg, struct chcr_alg_template, alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u8 temp[SHA512_DIGEST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct cpl_fw6_pld *fw6_pld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int cmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) fw6_pld = (struct cpl_fw6_pld *)input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) authsize, req->assoclen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) req->cryptlen - authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *err = -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int chcr_inc_wrcount(struct chcr_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (dev->state == CHCR_DETACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) atomic_inc(&dev->inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline void chcr_dec_wrcount(struct chcr_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) atomic_dec(&dev->inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static inline int chcr_handle_aead_resp(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned char *input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct chcr_dev *dev = a_ctx(tfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (reqctx->verify == VERIFY_SW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) chcr_verify_tag(req, input, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) reqctx->verify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) req->base.complete(&req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void get_aes_decrypt_key(unsigned char *dec_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) const unsigned char *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned int keylength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 w_ring[MAX_NK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u8 nr, nk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) switch (keylength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case AES_KEYLENGTH_128BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) nk = KEYLENGTH_4BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) nr = NUMBER_OF_ROUNDS_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case AES_KEYLENGTH_192BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) nk = KEYLENGTH_6BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) nr = NUMBER_OF_ROUNDS_12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case AES_KEYLENGTH_256BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) nk = KEYLENGTH_8BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nr = NUMBER_OF_ROUNDS_14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) for (i = 0; i < nk; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) w_ring[i] = get_unaligned_be32(&key[i * 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) temp = w_ring[nk - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) while (i + nk < (nr + 1) * 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!(i % nk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* RotWord(temp) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) temp = (temp << 8) | (temp >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) temp = aes_ks_subword(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) temp ^= round_constant[i / nk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } else if (nk == 8 && (i % 4 == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) temp = aes_ks_subword(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) w_ring[i % nk] ^= temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) temp = w_ring[i % nk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) for (k = 0, j = i % nk; k < nk; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) j--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (j < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) j += nk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) switch (ds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case SHA1_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) base_hash = crypto_alloc_shash("sha1", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case SHA224_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) base_hash = crypto_alloc_shash("sha224", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case SHA256_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) base_hash = crypto_alloc_shash("sha256", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case SHA384_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) base_hash = crypto_alloc_shash("sha384", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case SHA512_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) base_hash = crypto_alloc_shash("sha512", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return base_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int chcr_compute_partial_hash(struct shash_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) char *iopad, char *result_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int digest_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct sha1_state sha1_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct sha256_state sha256_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct sha512_state sha512_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (digest_size == SHA1_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) error = crypto_shash_init(desc) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) crypto_shash_export(desc, (void *)&sha1_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else if (digest_size == SHA224_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) error = crypto_shash_init(desc) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) crypto_shash_export(desc, (void *)&sha256_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) } else if (digest_size == SHA256_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) error = crypto_shash_init(desc) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) crypto_shash_export(desc, (void *)&sha256_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else if (digest_size == SHA384_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) error = crypto_shash_init(desc) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) crypto_shash_export(desc, (void *)&sha512_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } else if (digest_size == SHA512_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) error = crypto_shash_init(desc) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) crypto_shash_export(desc, (void *)&sha512_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pr_err("Unknown digest size %d\n", digest_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void chcr_change_order(char *buf, int ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ds == SHA512_DIGEST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (i = 0; i < (ds / sizeof(u64)); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *((__be64 *)buf + i) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cpu_to_be64(*((u64 *)buf + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) for (i = 0; i < (ds / sizeof(u32)); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *((__be32 *)buf + i) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cpu_to_be32(*((u32 *)buf + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline int is_hmac(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct crypto_alg *alg = tfm->__crt_alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct chcr_alg_template *chcr_crypto_alg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) alg.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline void dsgl_walk_init(struct dsgl_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct cpl_rx_phys_dsgl *dsgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) walk->dsgl = dsgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) walk->nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) walk->to = (struct phys_sge_pairs *)(dsgl + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int pci_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct cpl_rx_phys_dsgl *phys_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) phys_cpl = walk->dsgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) phys_cpl->pcirlxorder_to_noofsgentr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) CPL_RX_PHYS_DSGL_DCAID_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) phys_cpl->rss_hdr_int.qid = htons(qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) phys_cpl->rss_hdr_int.hash_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) phys_cpl->rss_hdr_int.channel = pci_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) j = walk->nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) walk->to->len[j % 8] = htons(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) walk->to->addr[j % 8] = cpu_to_be64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if ((j % 8) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) walk->to++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) walk->nents = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void dsgl_walk_add_sg(struct dsgl_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int slen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned int skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned int left_size = slen, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned int j = walk->nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int offset, ent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!slen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) while (sg && skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (sg_dma_len(sg) <= skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) skip -= sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) skip_len = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) while (left_size && sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) walk->to->len[j % 8] = htons(ent_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) offset + skip_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) offset += ent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) len -= ent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if ((j % 8) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) walk->to++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) walk->last_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) skip_len) + skip_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) walk->nents = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static inline void ulptx_walk_init(struct ulptx_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct ulptx_sgl *ulp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) walk->sgl = ulp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) walk->nents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) walk->pair_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) walk->pair = ulp->sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) walk->last_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) walk->last_sg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static inline void ulptx_walk_end(struct ulptx_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ULPTX_NSGE_V(walk->nents));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (walk->nents == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) walk->sgl->len0 = cpu_to_be32(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) walk->sgl->addr0 = cpu_to_be64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) walk->pair_idx = !walk->pair_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!walk->pair_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) walk->pair++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) walk->nents++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void ulptx_walk_add_sg(struct ulptx_walk *walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned int skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int small;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unsigned int sgmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) while (sg && skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (sg_dma_len(sg) <= skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) skip -= sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) skip_len = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) WARN(!sg, "SG should not be null here\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (sg && (walk->nents == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) walk->sgl->len0 = cpu_to_be32(sgmin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) walk->nents++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) len -= sgmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) walk->last_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) walk->last_sg_len = sgmin + skip_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) skip_len += sgmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (sg_dma_len(sg) == skip_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) while (sg && len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) small = min(sg_dma_len(sg) - skip_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) walk->pair->addr[walk->pair_idx] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cpu_to_be64(sg_dma_address(sg) + skip_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) walk->pair_idx = !walk->pair_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) walk->nents++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!walk->pair_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) walk->pair++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) len -= sgmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) skip_len += sgmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) walk->last_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) walk->last_sg_len = skip_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (sg_dma_len(sg) == skip_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) skip_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct chcr_alg_template *chcr_crypto_alg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) container_of(alg, struct chcr_alg_template, alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct adapter *adap = netdev2adap(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct sge_uld_txq_info *txq_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct sge_uld_txq *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) txq = &txq_info->uldtxq[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) spin_lock(&txq->sendq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (txq->full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_unlock(&txq->sendq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct _key_ctx *key_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) memcpy(key_ctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ablkctx->key + (ablkctx->enckey_len >> 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ablkctx->enckey_len >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ablkctx->rrkey, ablkctx->enckey_len >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int chcr_hash_ent_in_wr(struct scatterlist *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned int minsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned int space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned int srcskip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int srclen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int srcsg = minsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int soffset = 0, sless;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (sg_dma_len(src) == srcskip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) src = sg_next(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) srcskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) while (src && space > (sgl_ent_len[srcsg + 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) CHCR_SRC_SG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) srclen += sless;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) soffset += sless;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) srcsg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (sg_dma_len(src) == (soffset + srcskip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) src = sg_next(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) soffset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) srcskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return srclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int chcr_sg_ent_in_wr(struct scatterlist *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct scatterlist *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned int minsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned int space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned int srcskip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int dstskip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int srclen = 0, dstlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int srcsg = minsg, dstsg = minsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int offset = 0, soffset = 0, less, sless = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (sg_dma_len(src) == srcskip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) src = sg_next(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) srcskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (sg_dma_len(dst) == dstskip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dst = sg_next(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dstskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) while (src && dst &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) CHCR_SRC_SG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) srclen += sless;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) srcsg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (srclen <= dstlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) less = min_t(unsigned int, sg_dma_len(dst) - offset -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dstskip, CHCR_DST_SG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dstlen += less;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) offset += less;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if ((offset + dstskip) == sg_dma_len(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dst = sg_next(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dstsg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dstskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) soffset += sless;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if ((soffset + srcskip) == sg_dma_len(src)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) src = sg_next(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) srcskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) soffset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return min(srclen, dstlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) req->base.complete, req->base.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) req->cryptlen, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) crypto_skcipher_encrypt(&reqctx->fallback_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static inline int get_qidxs(struct crypto_async_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) unsigned int *txqidx, unsigned int *rxqidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct crypto_tfm *tfm = req->tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct aead_request *aead_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) container_of(req, struct aead_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *txqidx = reqctx->txqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *rxqidx = reqctx->rxqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct skcipher_request *sk_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) container_of(req, struct skcipher_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct chcr_skcipher_req_ctx *reqctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) skcipher_request_ctx(sk_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) *txqidx = reqctx->txqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) *rxqidx = reqctx->rxqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct ahash_request *ahash_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) container_of(req, struct ahash_request, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct chcr_ahash_req_ctx *reqctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ahash_request_ctx(ahash_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) *txqidx = reqctx->txqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) *rxqidx = reqctx->rxqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* should never get here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static inline void create_wreq(struct chcr_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct chcr_wr *chcr_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct crypto_async_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned int imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int hash_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int len16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int sc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) unsigned int lcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned int tx_channel_id, rx_channel_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) unsigned int txqidx = 0, rxqidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned int qid, fid, portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) get_qidxs(req, &txqidx, &rxqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) qid = u_ctx->lldi.rxq_ids[rxqidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) fid = u_ctx->lldi.rxq_ids[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) portno = rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) tx_channel_id = txqidx / ctx->txq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) chcr_req->wreq.pld_size_hash_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) chcr_req->wreq.len16_pkd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) !!lcb, txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ((sizeof(chcr_req->wreq)) >> 4)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) sizeof(chcr_req->key_ctx) + sc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * create_cipher_wr - form the WR for cipher operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @req: cipher req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @ctx: crypto driver context of the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * @qid: ingress qid where response of this WR should be received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @op_type: encryption or decryption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct chcr_context *ctx = c_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct chcr_wr *chcr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct cpl_rx_phys_dsgl *phys_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct ulptx_sgl *ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct chcr_skcipher_req_ctx *reqctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) skcipher_request_ctx(wrparam->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned int temp = 0, transhdr_len, dst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) unsigned int kctx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct adapter *adap = padap(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) reqctx->dst_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dst_size = get_space_for_phys_dsgl(nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) kctx_len = roundup(ablkctx->enckey_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) CHCR_SRC_SG_SIZE, reqctx->src_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) (sgl_len(nents) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) transhdr_len += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) transhdr_len = roundup(transhdr_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) skb = alloc_skb(SGE_MAX_WR_LEN, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) chcr_req = __skb_put_zero(skb, transhdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) chcr_req->sec_cpl.op_ivinsrtofst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) chcr_req->sec_cpl.aadstart_cipherstop_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) chcr_req->sec_cpl.cipherstop_lo_authinsert =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ablkctx->ciph_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 0, 0, IV >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 0, 1, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if ((reqctx->op == CHCR_DECRYPT_OP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) (!(get_cryptoalg_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) CRYPTO_ALG_SUB_TYPE_CTR)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) (!(get_cryptoalg_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) memcpy(chcr_req->key_ctx.key, ablkctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ablkctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) memcpy(chcr_req->key_ctx.key, ablkctx->key +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) (ablkctx->enckey_len >> 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ablkctx->enckey_len >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) memcpy(chcr_req->key_ctx.key +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) (ablkctx->enckey_len >> 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ablkctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ablkctx->enckey_len >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) atomic_inc(&adap->chcr_stats.cipher_rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) + (reqctx->imm ? (wrparam->bytes) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) transhdr_len, temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) reqctx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (reqctx->op && (ablkctx->ciph_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) CHCR_SCMD_CIPHER_MODE_AES_CBC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) sg_pcopy_to_buffer(wrparam->req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sg_nents(wrparam->req->src), wrparam->req->iv, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static inline int chcr_keyctx_ck_size(unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int ck_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (keylen == AES_KEYSIZE_128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) else if (keylen == AES_KEYSIZE_192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) else if (keylen == AES_KEYSIZE_256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ck_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return ck_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) crypto_skcipher_clear_flags(ablkctx->sw_cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) crypto_skcipher_set_flags(ablkctx->sw_cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) unsigned int ck_size, context_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u16 alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) err = chcr_cipher_fallback_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto badkey_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ck_size = chcr_keyctx_ck_size(keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) memcpy(ablkctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) ablkctx->enckey_len = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) keylen + alignment) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 0, 0, context_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) badkey_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ablkctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) unsigned int ck_size, context_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u16 alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) err = chcr_cipher_fallback_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) goto badkey_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ck_size = chcr_keyctx_ck_size(keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) memcpy(ablkctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ablkctx->enckey_len = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) keylen + alignment) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) 0, 0, context_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) badkey_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ablkctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) unsigned int ck_size, context_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u16 alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (keylen < CTR_RFC3686_NONCE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) keylen -= CTR_RFC3686_NONCE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) err = chcr_cipher_fallback_setkey(cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) goto badkey_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ck_size = chcr_keyctx_ck_size(keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) memcpy(ablkctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ablkctx->enckey_len = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) keylen + alignment) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 0, 0, context_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) badkey_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ablkctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) unsigned int size = AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) __be32 *b = (__be32 *)(dstiv + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u32 c, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) memcpy(dstiv, srciv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) for (; size >= 4; size -= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) prev = be32_to_cpu(*--b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) c = prev + add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *b = cpu_to_be32(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (prev < c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) add = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) u64 c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) u32 temp = be32_to_cpu(*--b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) temp = ~temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) c = (u64)temp + 1; // No of block can processed without overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if ((bytes / AES_BLOCK_SIZE) >= c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) bytes = c * AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) u32 isfinal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct crypto_aes_ctx aes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) u8 *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) unsigned int keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int round = reqctx->last_req_len / AES_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int round8 = round / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) keylen = ablkctx->enckey_len / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) key = ablkctx->key + keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* For a 192 bit key remove the padded zeroes which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * added in chcr_xts_setkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ret = aes_expandkey(&aes, key, keylen - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = aes_expandkey(&aes, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) aes_encrypt(&aes, iv, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) for (i = 0; i < round8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) for (i = 0; i < (round % 8); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) gf128mul_x_ble((le128 *)iv, (le128 *)iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!isfinal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) aes_decrypt(&aes, iv, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) memzero_explicit(&aes, sizeof(aes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static int chcr_update_cipher_iv(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct cpl_fw6_pld *fw6_pld, u8 *iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int subtype = get_cryptoalg_subtype(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ctr_add_iv(iv, req->iv, (reqctx->processed /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) AES_BLOCK_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) AES_BLOCK_SIZE) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ret = chcr_update_tweak(req, iv, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (reqctx->op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*Updated before sending last WR*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) memcpy(iv, req->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* We need separate function for final iv because in rfc3686 Initial counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * starts from 1 and buffer size of iv is 8 byte only which remains constant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * for subsequent update requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static int chcr_final_cipher_iv(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct cpl_fw6_pld *fw6_pld, u8 *iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int subtype = get_cryptoalg_subtype(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) AES_BLOCK_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (!reqctx->partial_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ret = chcr_update_tweak(req, iv, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*Already updated for Decrypt*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!reqctx->op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static int chcr_handle_cipher_resp(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) unsigned char *input, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct chcr_dev *dev = c_ctx(tfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct chcr_context *ctx = c_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct adapter *adap = padap(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct cipher_wr_param wrparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (req->cryptlen == reqctx->processed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!reqctx->imm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) CIP_SPACE_LEFT(ablkctx->enckey_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) reqctx->src_ofst, reqctx->dst_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if ((bytes + reqctx->processed) >= req->cryptlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) bytes = req->cryptlen - reqctx->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) bytes = rounddown(bytes, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*CTR mode counter overfloa*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) bytes = req->cryptlen - reqctx->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (unlikely(bytes == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) memcpy(req->iv, reqctx->init_iv, IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) atomic_inc(&adap->chcr_stats.fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) reqctx->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (get_cryptoalg_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) CRYPTO_ALG_SUB_TYPE_CTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) bytes = adjust_ctr_overflow(reqctx->iv, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) wrparam.req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) wrparam.bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) skb = create_cipher_wr(&wrparam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) pr_err("%s : Failed to form WR. No memory\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) err = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) reqctx->last_req_len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) reqctx->processed += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (get_cryptoalg_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) CRYPTO_TFM_REQ_MAY_SLEEP ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) complete(&ctx->cbc_aes_aio_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (get_cryptoalg_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) CRYPTO_TFM_REQ_MAY_SLEEP ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) complete(&ctx->cbc_aes_aio_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) req->base.complete(&req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static int process_cipher(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned short qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct sk_buff **skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) unsigned int ivsize = crypto_skcipher_ivsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct adapter *adap = padap(c_ctx(tfm)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct cipher_wr_param wrparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int bytes, err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) reqctx->processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) reqctx->partial_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!req->iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) subtype = get_cryptoalg_subtype(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) (req->cryptlen == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) subtype == CRYPTO_ALG_SUB_TYPE_XTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ablkctx->enckey_len, req->cryptlen, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) AES_MIN_KEY_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) sizeof(struct cpl_rx_phys_dsgl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*Min dsgl size*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* Can be sent as Imm*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) dnents = sg_nents_xlen(req->dst, req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) CHCR_DST_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) phys_dsgl = get_space_for_phys_dsgl(dnents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) kctx_len = roundup(ablkctx->enckey_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) SGE_MAX_WR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) bytes = IV + req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) reqctx->imm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!reqctx->imm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) CIP_SPACE_LEFT(ablkctx->enckey_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if ((bytes + reqctx->processed) >= req->cryptlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) bytes = req->cryptlen - reqctx->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) bytes = rounddown(bytes, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) bytes = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) bytes = adjust_ctr_overflow(req->iv, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) CTR_RFC3686_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* initialize counter portion of counter block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) memcpy(reqctx->init_iv, reqctx->iv, IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) memcpy(reqctx->iv, req->iv, IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) memcpy(reqctx->init_iv, req->iv, IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (unlikely(bytes == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) fallback: atomic_inc(&adap->chcr_stats.fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) subtype ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) reqctx->iv : req->iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) op_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) reqctx->op = op_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) reqctx->srcsg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) reqctx->dstsg = req->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) reqctx->src_ofst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) reqctx->dst_ofst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) wrparam.qid = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) wrparam.req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) wrparam.bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *skb = create_cipher_wr(&wrparam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (IS_ERR(*skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) err = PTR_ERR(*skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) reqctx->processed = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) reqctx->last_req_len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static int chcr_aes_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct chcr_dev *dev = c_ctx(tfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct chcr_context *ctx = c_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) reqctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) reqctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) err = chcr_inc_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) reqctx->txqidx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) &skb, CHCR_ENCRYPT_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (err || !skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (get_cryptoalg_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) CRYPTO_TFM_REQ_MAY_SLEEP ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) reqctx->partial_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) wait_for_completion(&ctx->cbc_aes_aio_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static int chcr_aes_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct chcr_dev *dev = c_ctx(tfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct chcr_context *ctx = c_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) reqctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) reqctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) err = chcr_inc_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) reqctx->txqidx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) &skb, CHCR_DECRYPT_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (err || !skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static int chcr_device_init(struct chcr_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct uld_ctx *u_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) int txq_perchan, ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int err = 0, rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (!ctx->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) u_ctx = assign_chcr_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (!u_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) pr_err("chcr device assignment fails\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) ctx->dev = &u_ctx->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ntxq = u_ctx->lldi.ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) txq_perchan = ntxq / u_ctx->lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) ctx->ntxq = ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ctx->nrxq = u_ctx->lldi.nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) ctx->rxq_perchan = rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) ctx->txq_perchan = txq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static int chcr_init_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (IS_ERR(ablkctx->sw_cipher)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return PTR_ERR(ablkctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) init_completion(&ctx->cbc_aes_aio_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) crypto_skcipher_reqsize(ablkctx->sw_cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return chcr_device_init(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * cannot be used as fallback in chcr_handle_cipher_response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) CRYPTO_ALG_NEED_FALLBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (IS_ERR(ablkctx->sw_cipher)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return PTR_ERR(ablkctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) crypto_skcipher_reqsize(ablkctx->sw_cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return chcr_device_init(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static void chcr_exit_tfm(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) crypto_free_skcipher(ablkctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static int get_alg_config(struct algo_param *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) unsigned int auth_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) switch (auth_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) case SHA1_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) params->result_size = SHA1_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) case SHA224_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) params->result_size = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) case SHA256_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) params->result_size = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) case SHA384_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) params->result_size = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) case SHA512_DIGEST_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) params->result_size = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) pr_err("ERROR, unsupported digest size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static inline void chcr_free_shash(struct crypto_shash *base_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) crypto_free_shash(base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * create_hash_wr - Create hash work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * @req - Cipher req base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static struct sk_buff *create_hash_wr(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct hash_wr_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct chcr_context *ctx = h_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct chcr_wr *chcr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct ulptx_sgl *ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) unsigned int nents = 0, transhdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) unsigned int temp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct adapter *adap = padap(h_ctx(tfm)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) param->sg_len) <= SGE_MAX_WR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) nents += param->bfr_len ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) param->sg_len, 16) : (sgl_len(nents) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) transhdr_len = roundup(transhdr_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) skb = alloc_skb(transhdr_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) chcr_req = __skb_put_zero(skb, transhdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) chcr_req->sec_cpl.op_ivinsrtofst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) chcr_req->sec_cpl.aadstart_cipherstop_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) chcr_req->sec_cpl.cipherstop_lo_authinsert =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) chcr_req->sec_cpl.seqno_numivs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) param->opad_needed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) chcr_req->sec_cpl.ivgen_hdrlen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) param->alg_prm.result_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (param->opad_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) memcpy(chcr_req->key_ctx.key +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ((param->alg_prm.result_size <= 32) ? 32 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) CHCR_HASH_MAX_DIGEST_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) hmacctx->opad, param->alg_prm.result_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) param->alg_prm.mk_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) param->opad_needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) ((param->kctx_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) sizeof(chcr_req->key_ctx)) >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) DUMMY_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (param->bfr_len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) req_ctx->hctx_wr.dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) param->bfr_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) req_ctx->hctx_wr. dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) req_ctx->hctx_wr.dma_len = param->bfr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) req_ctx->hctx_wr.dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) chcr_add_hash_src_ent(req, ulptx, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) /* Request upto max wr size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) (param->sg_len + param->bfr_len) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) atomic_inc(&adap->chcr_stats.digest_rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) param->hash_size, transhdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) temp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) req_ctx->hctx_wr.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static int chcr_ahash_update(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct chcr_context *ctx = h_ctx(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct chcr_dev *dev = h_ctx(rtfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) u8 remainder = 0, bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) unsigned int nbytes = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) struct hash_wr_param params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) req_ctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) req_ctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (nbytes + req_ctx->reqlen >= bs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) remainder = (nbytes + req_ctx->reqlen) % bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) nbytes = nbytes + req_ctx->reqlen - remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) + req_ctx->reqlen, nbytes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) req_ctx->reqlen += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) error = chcr_inc_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* Detach state for CHCR means lldi or padap is freed. Increasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * inflight count for dev guarantees that lldi and padap is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) req_ctx->txqidx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) chcr_init_hctx_per_wr(req_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) params.kctx_len = roundup(params.alg_prm.result_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) HASH_SPACE_LEFT(params.kctx_len), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (params.sg_len > req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) params.sg_len = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) req_ctx->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) params.last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) params.bfr_len = req_ctx->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) params.scmd1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) req_ctx->hctx_wr.srcsg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) params.hash_size = params.alg_prm.result_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) req_ctx->data_len += params.sg_len + params.bfr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) skb = create_hash_wr(req, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) error = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) req_ctx->hctx_wr.processed += params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (remainder) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /* Swap buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) swap(req_ctx->reqbfr, req_ctx->skbfr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) sg_pcopy_to_buffer(req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) req_ctx->reqbfr, remainder, req->nbytes -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) req_ctx->reqlen = remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) memset(bfr_ptr, 0, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) *bfr_ptr = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (bs == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static int chcr_ahash_final(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct chcr_dev *dev = h_ctx(rtfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct hash_wr_param params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct chcr_context *ctx = h_ctx(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) req_ctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) req_ctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) error = chcr_inc_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) chcr_init_hctx_per_wr(req_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (is_hmac(crypto_ahash_tfm(rtfm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) params.opad_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) params.sg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) req_ctx->hctx_wr.isfinal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) params.kctx_len = roundup(params.alg_prm.result_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) params.opad_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) params.kctx_len *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) req_ctx->hctx_wr.result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) params.bfr_len = req_ctx->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) req_ctx->data_len += params.bfr_len + params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) req_ctx->hctx_wr.srcsg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (req_ctx->reqlen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) params.last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) params.scmd1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) params.bfr_len = bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) params.scmd1 = req_ctx->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) params.last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) params.more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) params.hash_size = crypto_ahash_digestsize(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) skb = create_hash_wr(req, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) error = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) req_ctx->reqlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static int chcr_ahash_finup(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct chcr_dev *dev = h_ctx(rtfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct chcr_context *ctx = h_ctx(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct hash_wr_param params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) u8 bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) req_ctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) req_ctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) error = chcr_inc_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) req_ctx->txqidx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) chcr_init_hctx_per_wr(req_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) params.kctx_len = roundup(params.alg_prm.result_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) params.kctx_len *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) params.opad_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) HASH_SPACE_LEFT(params.kctx_len), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (params.sg_len < req->nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) params.kctx_len /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) params.last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) - req_ctx->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) params.hash_size = params.alg_prm.result_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) params.scmd1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) params.last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) params.more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) params.sg_len = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) params.hash_size = crypto_ahash_digestsize(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) params.bfr_len = req_ctx->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) req_ctx->data_len += params.bfr_len + params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) req_ctx->hctx_wr.result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) req_ctx->hctx_wr.srcsg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if ((req_ctx->reqlen + req->nbytes) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) params.last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) params.scmd1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) params.bfr_len = bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) skb = create_hash_wr(req, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) error = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) req_ctx->reqlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) req_ctx->hctx_wr.processed += params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static int chcr_ahash_digest(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct chcr_dev *dev = h_ctx(rtfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct chcr_context *ctx = h_ctx(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct hash_wr_param params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) u8 bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) req_ctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) req_ctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) rtfm->init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) error = chcr_inc_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) req_ctx->txqidx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) chcr_init_hctx_per_wr(req_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) params.kctx_len = roundup(params.alg_prm.result_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) params.kctx_len *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) params.opad_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) HASH_SPACE_LEFT(params.kctx_len), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (params.sg_len < req->nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) params.kctx_len /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) params.last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) params.scmd1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) params.sg_len = rounddown(params.sg_len, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) params.hash_size = params.alg_prm.result_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) params.sg_len = req->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) params.hash_size = crypto_ahash_digestsize(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) params.last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) params.more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) params.scmd1 = req->nbytes + req_ctx->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) params.bfr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) req_ctx->hctx_wr.result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) req_ctx->hctx_wr.srcsg = req->src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) req_ctx->data_len += params.bfr_len + params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (req->nbytes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) params.bfr_len = bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) skb = create_hash_wr(req, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) error = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) req_ctx->hctx_wr.processed += params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) static int chcr_ahash_continue(struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) struct chcr_context *ctx = h_ctx(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) struct hash_wr_param params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) u8 bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) reqctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) reqctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) params.kctx_len = roundup(params.alg_prm.result_size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) params.kctx_len *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) params.opad_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) HASH_SPACE_LEFT(params.kctx_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) hctx_wr->src_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if ((params.sg_len + hctx_wr->processed) > req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) params.sg_len = req->nbytes - hctx_wr->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!hctx_wr->result ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) params.kctx_len /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) params.opad_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) params.last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) params.more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) params.sg_len = rounddown(params.sg_len, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) params.hash_size = params.alg_prm.result_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) params.scmd1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) params.last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) params.more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) params.hash_size = crypto_ahash_digestsize(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) params.scmd1 = reqctx->data_len + params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) params.bfr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) reqctx->data_len += params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) skb = create_hash_wr(req, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) error = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) hctx_wr->processed += params.sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) static inline void chcr_handle_ahash_resp(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) unsigned char *input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int digestsize, updated_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) struct chcr_dev *dev = h_ctx(tfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (input == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) updated_digestsize = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (digestsize == SHA224_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) updated_digestsize = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) else if (digestsize == SHA384_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) updated_digestsize = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (hctx_wr->dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) hctx_wr->dma_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) hctx_wr->dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) req->nbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (hctx_wr->result == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) hctx_wr->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) memcpy(reqctx->partial_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) input + sizeof(struct cpl_fw6_pld),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) updated_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) updated_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) err = chcr_ahash_continue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (hctx_wr->is_sg_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) chcr_dec_wrcount(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) req->base.complete(&req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * chcr_handle_resp - Unmap the DMA buffers associated with the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * @req: crypto request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct crypto_tfm *tfm = req->tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct chcr_context *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) struct adapter *adap = padap(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) chcr_handle_cipher_resp(skcipher_request_cast(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) input, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) atomic_inc(&adap->chcr_stats.complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static int chcr_ahash_export(struct ahash_request *areq, void *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct chcr_ahash_req_ctx *state = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) state->reqlen = req_ctx->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) state->data_len = req_ctx->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) memcpy(state->partial_hash, req_ctx->partial_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) CHCR_HASH_MAX_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) chcr_init_hctx_per_wr(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) static int chcr_ahash_import(struct ahash_request *areq, const void *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) req_ctx->reqlen = state->reqlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) req_ctx->data_len = state->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) req_ctx->reqbfr = req_ctx->bfr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) req_ctx->skbfr = req_ctx->bfr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) memcpy(req_ctx->partial_hash, state->partial_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) CHCR_HASH_MAX_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) chcr_init_hctx_per_wr(req_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) unsigned int digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) unsigned int i, err = 0, updated_digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /* use the key to calculate the ipad and opad. ipad will sent with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) * first request's data. opad will be sent with the final hash result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) * ipad in hmacctx->ipad and opad in hmacctx->opad location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) shash->tfm = hmacctx->base_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (keylen > bs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) err = crypto_shash_digest(shash, key, keylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) hmacctx->ipad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) keylen = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) memcpy(hmacctx->ipad, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) memset(hmacctx->ipad + keylen, 0, bs - keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) memcpy(hmacctx->opad, hmacctx->ipad, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) for (i = 0; i < bs / sizeof(int); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) updated_digestsize = digestsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (digestsize == SHA224_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) updated_digestsize = SHA256_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) else if (digestsize == SHA384_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) updated_digestsize = SHA512_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) err = chcr_compute_partial_hash(shash, hmacctx->ipad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) hmacctx->ipad, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) chcr_change_order(hmacctx->ipad, updated_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) err = chcr_compute_partial_hash(shash, hmacctx->opad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) hmacctx->opad, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) chcr_change_order(hmacctx->opad, updated_digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) unsigned int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) unsigned short context_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) err = chcr_cipher_fallback_setkey(cipher, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) goto badkey_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) memcpy(ablkctx->key, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ablkctx->enckey_len = key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) /* Both keys for xts must be aligned to 16 byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * by padding with zeros. So for 24 byte keys padding 8 zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (key_len == 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) + 16) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) memset(ablkctx->key + 24, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) memset(ablkctx->key + 56, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) ablkctx->enckey_len = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) ablkctx->key_ctx_hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) CHCR_KEYCTX_NO_KEY, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 0, context_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ablkctx->key_ctx_hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) CHCR_KEYCTX_NO_KEY, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 0, context_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) badkey_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) ablkctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) static int chcr_sha_init(struct ahash_request *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) int digestsize = crypto_ahash_digestsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) req_ctx->data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) req_ctx->reqlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) req_ctx->reqbfr = req_ctx->bfr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) req_ctx->skbfr = req_ctx->bfr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) copy_hash_init_values(req_ctx->partial_hash, digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) static int chcr_sha_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) sizeof(struct chcr_ahash_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return chcr_device_init(crypto_tfm_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) static int chcr_hmac_init(struct ahash_request *areq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) unsigned int digestsize = crypto_ahash_digestsize(rtfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) chcr_sha_init(areq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) req_ctx->data_len = bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (is_hmac(crypto_ahash_tfm(rtfm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (digestsize == SHA224_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) memcpy(req_ctx->partial_hash, hmacctx->ipad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) SHA256_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) else if (digestsize == SHA384_DIGEST_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) memcpy(req_ctx->partial_hash, hmacctx->ipad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) SHA512_DIGEST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) memcpy(req_ctx->partial_hash, hmacctx->ipad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) struct chcr_context *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) unsigned int digestsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) sizeof(struct chcr_ahash_req_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) hmacctx->base_hash = chcr_alloc_shash(digestsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (IS_ERR(hmacctx->base_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return PTR_ERR(hmacctx->base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return chcr_device_init(crypto_tfm_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) struct chcr_context *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (hmacctx->base_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) chcr_free_shash(hmacctx->base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) hmacctx->base_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) inline void chcr_aead_common_exit(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static int chcr_aead_common_init(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /* validate key size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (aeadctx->enckey_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (reqctx->op && req->cryptlen < authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (reqctx->b0_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) reqctx->scratch_pad = reqctx->iv + IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) reqctx->scratch_pad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) reqctx->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) int aadmax, int wrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) dst_nents > MAX_DSGL_ENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) (req->assoclen > aadmax) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) (wrlen > SGE_MAX_WR_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct aead_request *subreq = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) aead_request_set_tfm(subreq, aeadctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) aead_request_set_callback(subreq, req->base.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) req->base.complete, req->base.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) aead_request_set_ad(subreq, req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) return op_type ? crypto_aead_decrypt(subreq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) crypto_aead_encrypt(subreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static struct sk_buff *create_authenc_wr(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) unsigned short qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) struct chcr_wr *chcr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) struct cpl_rx_phys_dsgl *phys_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) struct ulptx_sgl *ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) unsigned int transhdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) unsigned int kctx_len = 0, dnents, snents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) u8 *ivptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) int null = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) struct adapter *adap = padap(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (req->cryptlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) reqctx->b0_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) error = chcr_aead_common_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) null = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) dnents += MIN_AUTH_SG; // For IV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) CHCR_SRC_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) dst_size = get_space_for_phys_dsgl(dnents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) - sizeof(chcr_req->key_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) SGE_MAX_WR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) : (sgl_len(snents) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) transhdr_len += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) transhdr_len = roundup(transhdr_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) transhdr_len, reqctx->op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) atomic_inc(&adap->chcr_stats.fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) skb = alloc_skb(transhdr_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) chcr_req = __skb_put_zero(skb, transhdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * Input order is AAD,IV and Payload. where IV should be included as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * the part of authdata. All other fields should be filled according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * to the hardware spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) chcr_req->sec_cpl.op_ivinsrtofst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) null ? 0 : 1 + IV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) null ? 0 : IV + req->assoclen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) req->assoclen + IV + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) (temp & 0x1F0) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) temp & 0xF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) null ? 0 : req->assoclen + IV + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) temp, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) actx->auth_mode, aeadctx->hmac_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) IV >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 0, 0, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (reqctx->op == CHCR_ENCRYPT_OP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) memcpy(chcr_req->key_ctx.key, aeadctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) aeadctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) aeadctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) ivptr = (u8 *)(phys_cpl + 1) + dst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) ulptx = (struct ulptx_sgl *)(ivptr + IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) CTR_RFC3686_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) memcpy(ivptr, req->iv, IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) chcr_add_aead_dst_ent(req, phys_cpl, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) chcr_add_aead_src_ent(req, ulptx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) atomic_inc(&adap->chcr_stats.cipher_rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) transhdr_len, temp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) reqctx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) int chcr_aead_dma_map(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) int src_len, dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /* calculate and handle src and dst sg length separately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * for inplace and out-of place operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (req->src == req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) src_len = req->assoclen + req->cryptlen + (op_type ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 0 : authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) dst_len = src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) src_len = req->assoclen + req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) dst_len = req->assoclen + req->cryptlen + (op_type ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) -authsize : authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (!req->cryptlen || !src_len || !dst_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if (dma_mapping_error(dev, reqctx->iv_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (reqctx->b0_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) reqctx->b0_dma = reqctx->iv_dma + IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) reqctx->b0_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (req->src == req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) error = dma_map_sg(dev, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) sg_nents_for_len(req->src, src_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) error = dma_map_sg(dev, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) sg_nents_for_len(req->src, src_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) error = dma_map_sg(dev, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) sg_nents_for_len(req->dst, dst_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) dma_unmap_sg(dev, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) sg_nents_for_len(req->src, src_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) void chcr_aead_dma_unmap(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) int src_len, dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /* calculate and handle src and dst sg length separately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * for inplace and out-of place operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (req->src == req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) src_len = req->assoclen + req->cryptlen + (op_type ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 0 : authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) dst_len = src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) src_len = req->assoclen + req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) dst_len = req->assoclen + req->cryptlen + (op_type ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) -authsize : authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (!req->cryptlen || !src_len || !dst_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (req->src == req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) dma_unmap_sg(dev, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) sg_nents_for_len(req->src, src_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) dma_unmap_sg(dev, req->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) sg_nents_for_len(req->src, src_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) dma_unmap_sg(dev, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) sg_nents_for_len(req->dst, dst_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) void chcr_add_aead_src_ent(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct ulptx_sgl *ulptx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) struct ulptx_walk ulp_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (reqctx->imm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) u8 *buf = (u8 *)ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (reqctx->b0_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) buf += reqctx->b0_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) sg_pcopy_to_buffer(req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) buf, req->cryptlen + req->assoclen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ulptx_walk_init(&ulp_walk, ulptx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (reqctx->b0_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) reqctx->b0_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) req->assoclen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) ulptx_walk_end(&ulp_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) void chcr_add_aead_dst_ent(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) struct cpl_rx_phys_dsgl *phys_cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) unsigned short qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) struct dsgl_walk dsgl_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) dsgl_walk_init(&dsgl_walk, phys_cpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) temp = req->assoclen + req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) (reqctx->op ? -authsize : authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) void chcr_add_cipher_src_ent(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) void *ulptx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) struct cipher_wr_param *wrparam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) struct ulptx_walk ulp_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) u8 *buf = ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) memcpy(buf, reqctx->iv, IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) buf += IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) if (reqctx->imm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) sg_pcopy_to_buffer(req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) buf, wrparam->bytes, reqctx->processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) reqctx->src_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) reqctx->srcsg = ulp_walk.last_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) reqctx->src_ofst = ulp_walk.last_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) ulptx_walk_end(&ulp_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) void chcr_add_cipher_dst_ent(struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) struct cpl_rx_phys_dsgl *phys_cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) struct cipher_wr_param *wrparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) unsigned short qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) struct chcr_context *ctx = c_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) struct dsgl_walk dsgl_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) dsgl_walk_init(&dsgl_walk, phys_cpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) reqctx->dst_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) reqctx->dstsg = dsgl_walk.last_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) reqctx->dst_ofst = dsgl_walk.last_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) void chcr_add_hash_src_ent(struct ahash_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) struct ulptx_sgl *ulptx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) struct hash_wr_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) struct ulptx_walk ulp_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (reqctx->hctx_wr.imm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) u8 *buf = (u8 *)ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (param->bfr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) memcpy(buf, reqctx->reqbfr, param->bfr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) buf += param->bfr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) sg_nents(reqctx->hctx_wr.srcsg), buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) param->sg_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) ulptx_walk_init(&ulp_walk, ulptx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if (param->bfr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) ulptx_walk_add_page(&ulp_walk, param->bfr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) reqctx->hctx_wr.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) param->sg_len, reqctx->hctx_wr.src_ofst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) ulptx_walk_end(&ulp_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) int chcr_hash_dma_map(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) error = dma_map_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) req_ctx->hctx_wr.is_sg_map = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) void chcr_hash_dma_unmap(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (!req->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) dma_unmap_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) req_ctx->hctx_wr.is_sg_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) int chcr_cipher_dma_map(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (req->src == req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) error = dma_map_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) error = dma_map_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) dma_unmap_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) void chcr_cipher_dma_unmap(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (req->src == req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) dma_unmap_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) dma_unmap_sg(dev, req->src, sg_nents(req->src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) static int set_msg_len(u8 *block, unsigned int msglen, int csize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) __be32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) memset(block, 0, csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) block += csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (csize >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) csize = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) else if (msglen > (unsigned int)(1 << (8 * csize)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) data = cpu_to_be32(msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) static int generate_b0(struct aead_request *req, u8 *ivptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) unsigned int l, lp, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) struct crypto_aead *aead = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) u8 *b0 = reqctx->scratch_pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) m = crypto_aead_authsize(aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) memcpy(b0, ivptr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) lp = b0[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) l = lp + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) /* set m, bits 3-5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) *b0 |= (8 * ((m - 2) / 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) /* set adata, bit 6, if associated data is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) if (req->assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) *b0 |= 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) rc = set_msg_len(b0 + 16 - l,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) (op_type == CHCR_DECRYPT_OP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) req->cryptlen - m : req->cryptlen, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) static inline int crypto_ccm_check_iv(const u8 *iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) /* 2 <= L <= 8, so 1 <= L' <= 7. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (iv[0] < 1 || iv[0] > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) static int ccm_format_packet(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) u8 *ivptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) unsigned int sub_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) unsigned short op_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) unsigned int assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) ivptr[0] = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) memcpy(ivptr + 1, &aeadctx->salt[0], 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) memcpy(ivptr + 4, req->iv, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) memset(ivptr + 12, 0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) memcpy(ivptr, req->iv, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (assoclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) rc = generate_b0(req, ivptr, op_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /* zero the ctr value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) unsigned int dst_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) unsigned short op_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) unsigned int ccm_xtra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) unsigned int tag_offset = 0, auth_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) unsigned int assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) assoclen = req->assoclen - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) assoclen = req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) ccm_xtra = CCM_B0_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) auth_offset = req->cryptlen ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) (req->assoclen + IV + 1 + ccm_xtra) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) if (op_type == CHCR_DECRYPT_OP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) if (crypto_aead_authsize(tfm) != req->cryptlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) tag_offset = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) auth_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) sec_cpl->pldlen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* For CCM there wil be b0 always. So AAD start will be 1 always */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 1 + IV, IV + assoclen + ccm_xtra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) req->assoclen + IV + 1 + ccm_xtra, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) auth_offset, tag_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) (op_type == CHCR_ENCRYPT_OP) ? 0 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) crypto_aead_authsize(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) cipher_mode, mac_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) aeadctx->hmac_ctrl, IV >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 0, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) static int aead_ccm_validate_input(unsigned short op_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) struct chcr_aead_ctx *aeadctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) unsigned int sub_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) if (crypto_ccm_check_iv(req->iv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) pr_err("CCM: IV check fails\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (req->assoclen != 16 && req->assoclen != 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) pr_err("RFC4309: Invalid AAD length %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) unsigned short qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) struct chcr_wr *chcr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) struct cpl_rx_phys_dsgl *phys_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) struct ulptx_sgl *ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) unsigned int transhdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) unsigned int sub_type, assoclen = req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) u8 *ivptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) struct adapter *adap = padap(a_ctx(tfm)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) sub_type = get_aead_subtype(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) assoclen -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) error = chcr_aead_common_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) + (reqctx->op ? -authsize : authsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) CHCR_DST_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) dnents += MIN_CCM_SG; // For IV and B0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) dst_size = get_space_for_phys_dsgl(dnents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) CHCR_SRC_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) snents += MIN_CCM_SG; //For B0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) reqctx->b0_len) <= SGE_MAX_WR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) reqctx->b0_len, 16) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) (sgl_len(snents) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) transhdr_len += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) transhdr_len = roundup(transhdr_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) reqctx->b0_len, transhdr_len, reqctx->op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) atomic_inc(&adap->chcr_stats.fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) skb = alloc_skb(transhdr_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) chcr_req = __skb_put_zero(skb, transhdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) aeadctx->key, aeadctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) ivptr = (u8 *)(phys_cpl + 1) + dst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) ulptx = (struct ulptx_sgl *)(ivptr + IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) goto dstmap_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) chcr_add_aead_dst_ent(req, phys_cpl, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) chcr_add_aead_src_ent(req, ulptx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) atomic_inc(&adap->chcr_stats.aead_rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) reqctx->b0_len) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) transhdr_len, temp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) reqctx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) dstmap_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) static struct sk_buff *create_gcm_wr(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) unsigned short qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct chcr_wr *chcr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) struct cpl_rx_phys_dsgl *phys_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) struct ulptx_sgl *ulptx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) unsigned int transhdr_len, dnents = 0, snents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) unsigned int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) u8 *ivptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct adapter *adap = padap(ctx->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) assoclen = req->assoclen - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) reqctx->b0_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) error = chcr_aead_common_init(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) (reqctx->op ? -authsize : authsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) CHCR_DST_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) CHCR_SRC_SG_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) dnents += MIN_GCM_SG; // For IV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) dst_size = get_space_for_phys_dsgl(dnents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) SGE_MAX_WR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) (sgl_len(snents) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) transhdr_len += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) transhdr_len = roundup(transhdr_len, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) transhdr_len, reqctx->op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) atomic_inc(&adap->chcr_stats.fallback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) skb = alloc_skb(transhdr_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) chcr_req = __skb_put_zero(skb, transhdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) //Offset of tag from end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) rx_channel_id, 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) chcr_req->sec_cpl.pldlen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) htonl(req->assoclen + IV + req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) assoclen ? 1 + IV : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) assoclen ? IV + assoclen : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) req->assoclen + IV + 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) chcr_req->sec_cpl.cipherstop_lo_authinsert =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) temp, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) chcr_req->sec_cpl.seqno_numivs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) CHCR_ENCRYPT_OP) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) CHCR_SCMD_CIPHER_MODE_AES_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) CHCR_SCMD_AUTH_MODE_GHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) aeadctx->hmac_ctrl, IV >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 0, 0, dst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) ivptr = (u8 *)(phys_cpl + 1) + dst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) /* prepare a 16 byte iv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) /* S A L T | IV | 0x00000001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (get_aead_subtype(tfm) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) memcpy(ivptr, aeadctx->salt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) put_unaligned_be32(0x01, &ivptr[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) ulptx = (struct ulptx_sgl *)(ivptr + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) chcr_add_aead_dst_ent(req, phys_cpl, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) chcr_add_aead_src_ent(req, ulptx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) atomic_inc(&adap->chcr_stats.aead_rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) transhdr_len, temp, reqctx->verify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) reqctx->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) chcr_aead_common_exit(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static int chcr_aead_cra_init(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) struct aead_alg *alg = crypto_aead_alg(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) CRYPTO_ALG_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) if (IS_ERR(aeadctx->sw_cipher))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) return PTR_ERR(aeadctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) sizeof(struct aead_request) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) crypto_aead_reqsize(aeadctx->sw_cipher)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) return chcr_device_init(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) static void chcr_aead_cra_exit(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) crypto_free_aead(aeadctx->sw_cipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) u32 maxauth = crypto_aead_maxauthsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) * true for sha1. authsize == 12 condition should be before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) * authsize == (maxauth >> 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (authsize == ICV_4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) } else if (authsize == ICV_6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) } else if (authsize == ICV_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) } else if (authsize == ICV_12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) } else if (authsize == ICV_14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) } else if (authsize == (maxauth >> 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) } else if (authsize == maxauth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) aeadctx->mayverify = VERIFY_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) switch (authsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) case ICV_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) case ICV_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) case ICV_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) case ICV_14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) case ICV_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) case ICV_13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) case ICV_15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) aeadctx->mayverify = VERIFY_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) switch (authsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) case ICV_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) case ICV_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) case ICV_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) switch (authsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) case ICV_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) case ICV_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) case ICV_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) case ICV_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) case ICV_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) case ICV_14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) case ICV_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) aeadctx->mayverify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) static int chcr_ccm_common_setkey(struct crypto_aead *aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) unsigned char ck_size, mk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) int key_ctx_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (keylen == AES_KEYSIZE_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) } else if (keylen == AES_KEYSIZE_192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) } else if (keylen == AES_KEYSIZE_256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) aeadctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) key_ctx_size >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) memcpy(aeadctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) aeadctx->enckey_len = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) return chcr_ccm_common_setkey(aead, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) if (keylen < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) aeadctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) keylen -= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) memcpy(aeadctx->salt, key + keylen, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) return chcr_ccm_common_setkey(aead, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) unsigned int ck_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) int ret = 0, key_ctx_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) struct crypto_aes_ctx aes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) aeadctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) & CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) keylen > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) keylen -= 4; /* nonce/salt is present in the last 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) memcpy(aeadctx->salt, key + keylen, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) if (keylen == AES_KEYSIZE_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) } else if (keylen == AES_KEYSIZE_192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) } else if (keylen == AES_KEYSIZE_256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) pr_err("GCM: Invalid key length %d\n", keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) memcpy(aeadctx->key, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) aeadctx->enckey_len = keylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) AEAD_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) CHCR_KEYCTX_MAC_KEY_SIZE_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) key_ctx_size >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) /* Calculate the H = CIPH(K, 0 repeated 16 times).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) * It will go in key context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) ret = aes_expandkey(&aes, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) aeadctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) memset(gctx->ghash_h, 0, AEAD_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) memzero_explicit(&aes, sizeof(aes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) /* it contains auth and cipher key both*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) struct crypto_authenc_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) unsigned int bs, subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) int err = 0, i, key_ctx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) unsigned char ck_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) struct algo_param param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) u8 *o_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) & CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) if (get_alg_config(¶m, max_authsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) pr_err("Unsupported digest size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) subtype = get_aead_subtype(authenc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (keys.enckeylen == AES_KEYSIZE_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) } else if (keys.enckeylen == AES_KEYSIZE_192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) } else if (keys.enckeylen == AES_KEYSIZE_256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) pr_err("Unsupported cipher key\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) /* Copy only encryption key. We use authkey to generate h(ipad) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) * h(opad) so authkey is not needed again. authkeylen size have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) * size of the hash digest size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) aeadctx->enckey_len = keys.enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) aeadctx->enckey_len << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) base_hash = chcr_alloc_shash(max_authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) if (IS_ERR(base_hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) pr_err("Base driver cannot be loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) SHASH_DESC_ON_STACK(shash, base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) shash->tfm = base_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) bs = crypto_shash_blocksize(base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) align = KEYCTX_ALIGN_PAD(max_authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) o_ptr = actx->h_iopad + param.result_size + align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) if (keys.authkeylen > bs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) err = crypto_shash_digest(shash, keys.authkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) keys.authkeylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) o_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) pr_err("Base driver cannot be loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) keys.authkeylen = max_authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) memcpy(o_ptr, keys.authkey, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) /* Compute the ipad-digest*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) memcpy(pad, o_ptr, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) for (i = 0; i < bs >> 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) *((unsigned int *)pad + i) ^= IPAD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) max_authsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) /* Compute the opad-digest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) memcpy(pad, o_ptr, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) for (i = 0; i < bs >> 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) *((unsigned int *)pad + i) ^= OPAD_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) /* convert the ipad and opad digest to network order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) chcr_change_order(actx->h_iopad, param.result_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) chcr_change_order(o_ptr, param.result_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) key_ctx_len = sizeof(struct _key_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) roundup(keys.enckeylen, 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) (param.result_size + align) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 0, 1, key_ctx_len >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) actx->auth_mode = param.auth_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) chcr_free_shash(base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) aeadctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) if (!IS_ERR(base_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) chcr_free_shash(base_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) const u8 *key, unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) struct crypto_authenc_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /* it contains auth and cipher key both*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) unsigned int subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) int key_ctx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) unsigned char ck_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) & CRYPTO_TFM_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) subtype = get_aead_subtype(authenc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (keys.enckeylen == AES_KEYSIZE_128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) } else if (keys.enckeylen == AES_KEYSIZE_192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) } else if (keys.enckeylen == AES_KEYSIZE_256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) pr_err("Unsupported cipher key %d\n", keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) aeadctx->enckey_len = keys.enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) aeadctx->enckey_len << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 0, key_ctx_len >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) aeadctx->enckey_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) static int chcr_aead_op(struct aead_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) create_wr_t create_wr_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) struct uld_ctx *u_ctx = ULD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) struct chcr_dev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) cdev = a_ctx(tfm)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) if (!cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) pr_err("%s : No crypto device.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) if (chcr_inc_wrcount(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) /* Detach state for CHCR means lldi or padap is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) * We cannot increment fallback here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) return chcr_aead_fallback(req, reqctx->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) reqctx->txqidx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) chcr_dec_wrcount(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) crypto_ipsec_check_assoclen(req->assoclen) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) pr_err("RFC4106: Invalid value of assoclen %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) req->assoclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* Form a WR from req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) if (IS_ERR_OR_NULL(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) chcr_dec_wrcount(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) return PTR_ERR_OR_ZERO(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) skb->dev = u_ctx->lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) chcr_send_wr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) static int chcr_aead_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) reqctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) reqctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) reqctx->verify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) reqctx->op = CHCR_ENCRYPT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) switch (get_aead_subtype(tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) return chcr_aead_op(req, 0, create_authenc_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) return chcr_aead_op(req, 0, create_aead_ccm_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) return chcr_aead_op(req, 0, create_gcm_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) static int chcr_aead_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) struct chcr_context *ctx = a_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) reqctx->txqidx = cpu % ctx->ntxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) reqctx->rxqidx = cpu % ctx->nrxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) if (aeadctx->mayverify == VERIFY_SW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) size = crypto_aead_maxauthsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) reqctx->verify = VERIFY_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) reqctx->verify = VERIFY_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) reqctx->op = CHCR_DECRYPT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) switch (get_aead_subtype(tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) return chcr_aead_op(req, size, create_authenc_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) return chcr_aead_op(req, size, create_aead_ccm_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) return chcr_aead_op(req, size, create_gcm_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) static struct chcr_alg_template driver_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) /* AES-CBC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) .base.cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) .base.cra_driver_name = "cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) .init = chcr_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) .exit = chcr_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) .setkey = chcr_aes_cbc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) .encrypt = chcr_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) .decrypt = chcr_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) .base.cra_name = "xts(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) .base.cra_driver_name = "xts-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) .init = chcr_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) .exit = chcr_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) .min_keysize = 2 * AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) .max_keysize = 2 * AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) .setkey = chcr_aes_xts_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) .encrypt = chcr_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) .decrypt = chcr_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) .base.cra_name = "ctr(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) .base.cra_driver_name = "ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) .init = chcr_init_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) .exit = chcr_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) .setkey = chcr_aes_ctr_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) .encrypt = chcr_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) .decrypt = chcr_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) .type = CRYPTO_ALG_TYPE_SKCIPHER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) .alg.skcipher = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) .base.cra_name = "rfc3686(ctr(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) .init = chcr_rfc3686_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) .exit = chcr_exit_tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) .setkey = chcr_aes_rfc3686_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) .encrypt = chcr_aes_encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) .decrypt = chcr_aes_decrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) /* SHA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) .cra_name = "sha1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) .cra_driver_name = "sha1-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) .cra_name = "sha256",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) .cra_driver_name = "sha256-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) .cra_name = "sha224",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) .cra_driver_name = "sha224-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) .cra_name = "sha384",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) .cra_driver_name = "sha384-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) .cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) .type = CRYPTO_ALG_TYPE_AHASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) .cra_name = "sha512",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) .cra_driver_name = "sha512-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) /* HMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) .type = CRYPTO_ALG_TYPE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) .halg.digestsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) .cra_name = "hmac(sha1)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) .cra_driver_name = "hmac-sha1-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) .cra_blocksize = SHA1_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) .type = CRYPTO_ALG_TYPE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) .halg.digestsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) .cra_name = "hmac(sha224)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) .cra_driver_name = "hmac-sha224-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) .cra_blocksize = SHA224_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) .type = CRYPTO_ALG_TYPE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) .halg.digestsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) .cra_name = "hmac(sha256)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) .cra_driver_name = "hmac-sha256-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) .cra_blocksize = SHA256_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) .type = CRYPTO_ALG_TYPE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) .halg.digestsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) .cra_name = "hmac(sha384)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) .cra_driver_name = "hmac-sha384-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) .cra_blocksize = SHA384_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) .type = CRYPTO_ALG_TYPE_HMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) .alg.hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) .halg.digestsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) .halg.base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) .cra_name = "hmac(sha512)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) .cra_driver_name = "hmac-sha512-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) .cra_blocksize = SHA512_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) /* Add AEAD Algorithms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) .cra_name = "gcm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) .cra_driver_name = "gcm-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) sizeof(struct chcr_gcm_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) .ivsize = GCM_AES_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) .maxauthsize = GHASH_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) .setkey = chcr_gcm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) .setauthsize = chcr_gcm_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) .cra_name = "rfc4106(gcm(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) .cra_driver_name = "rfc4106-gcm-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) .cra_priority = CHCR_AEAD_PRIORITY + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) sizeof(struct chcr_gcm_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) .ivsize = GCM_RFC4106_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) .maxauthsize = GHASH_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) .setkey = chcr_gcm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) .setauthsize = chcr_4106_4309_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) .cra_name = "ccm(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) .cra_driver_name = "ccm-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) sizeof(struct chcr_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) .maxauthsize = GHASH_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) .setkey = chcr_aead_ccm_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) .setauthsize = chcr_ccm_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) .cra_name = "rfc4309(ccm(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) .cra_driver_name = "rfc4309-ccm-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) .cra_priority = CHCR_AEAD_PRIORITY + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) sizeof(struct chcr_aead_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) .ivsize = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) .maxauthsize = GHASH_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) .setkey = chcr_aead_rfc4309_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) .setauthsize = chcr_4106_4309_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) .cra_name = "authenc(hmac(sha1),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) "authenc-hmac-sha1-cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) .cra_name = "authenc(hmac(sha256),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) "authenc-hmac-sha256-cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) .cra_name = "authenc(hmac(sha224),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) "authenc-hmac-sha224-cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) .maxauthsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) .cra_name = "authenc(hmac(sha384),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) "authenc-hmac-sha384-cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) .maxauthsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) .cra_name = "authenc(hmac(sha512),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) "authenc-hmac-sha512-cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) .maxauthsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) .cra_name = "authenc(digest_null,cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) "authenc-digest_null-cbc-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) .maxauthsize = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) .setkey = chcr_aead_digest_null_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) .setauthsize = chcr_authenc_null_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) .maxauthsize = SHA256_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) .maxauthsize = SHA224_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) .maxauthsize = SHA384_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) .maxauthsize = SHA512_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) .setkey = chcr_authenc_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) .setauthsize = chcr_authenc_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) .is_registered = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) .alg.aead = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) .cra_driver_name =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) "authenc-digest_null-rfc3686-ctr-aes-chcr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) .cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) .cra_priority = CHCR_AEAD_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) .cra_ctxsize = sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) sizeof(struct chcr_aead_ctx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) sizeof(struct chcr_authenc_ctx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) .ivsize = CTR_RFC3686_IV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) .maxauthsize = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) .setkey = chcr_aead_digest_null_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) .setauthsize = chcr_authenc_null_setauthsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) * chcr_unregister_alg - Deregister crypto algorithms with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * kernel framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) static int chcr_unregister_alg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) if (driver_algs[i].is_registered && refcount_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) &driver_algs[i].alg.skcipher.base.cra_refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) crypto_unregister_skcipher(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) &driver_algs[i].alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) driver_algs[i].is_registered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) if (driver_algs[i].is_registered && refcount_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) crypto_unregister_aead(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) &driver_algs[i].alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) driver_algs[i].is_registered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (driver_algs[i].is_registered && refcount_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) &driver_algs[i].alg.hash.halg.base.cra_refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) crypto_unregister_ahash(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) &driver_algs[i].alg.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) driver_algs[i].is_registered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) #define SZ_AHASH_CTX sizeof(struct chcr_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) * chcr_register_alg - Register crypto algorithms with kernel framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) static int chcr_register_alg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) struct crypto_alg ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) struct ahash_alg *a_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) int err = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) if (driver_algs[i].is_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) case CRYPTO_ALG_TYPE_SKCIPHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) driver_algs[i].alg.skcipher.base.cra_priority =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) CHCR_CRA_PRIORITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) driver_algs[i].alg.skcipher.base.cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) CRYPTO_ALG_ALLOCATES_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) CRYPTO_ALG_NEED_FALLBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) driver_algs[i].alg.skcipher.base.cra_ctxsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) sizeof(struct chcr_context) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) sizeof(struct ablk_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) name = driver_algs[i].alg.skcipher.base.cra_driver_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) case CRYPTO_ALG_TYPE_AEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) driver_algs[i].alg.aead.base.cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) CRYPTO_ALG_ALLOCATES_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) driver_algs[i].alg.aead.init = chcr_aead_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) err = crypto_register_aead(&driver_algs[i].alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) name = driver_algs[i].alg.aead.base.cra_driver_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) case CRYPTO_ALG_TYPE_AHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) a_hash = &driver_algs[i].alg.hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) a_hash->update = chcr_ahash_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) a_hash->final = chcr_ahash_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) a_hash->finup = chcr_ahash_finup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) a_hash->digest = chcr_ahash_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) a_hash->export = chcr_ahash_export;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) a_hash->import = chcr_ahash_import;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) a_hash->halg.base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) a_hash->halg.base.cra_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) a_hash->halg.base.cra_alignmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) a_hash->halg.base.cra_exit = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) a_hash->halg.base.cra_init = chcr_hmac_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) a_hash->init = chcr_hmac_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) a_hash->setkey = chcr_ahash_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) a_hash->init = chcr_sha_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) a_hash->halg.base.cra_init = chcr_sha_cra_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) err = crypto_register_ahash(&driver_algs[i].alg.hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) ai = driver_algs[i].alg.hash.halg.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) name = ai.cra_driver_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) pr_err("%s : Algorithm registration failed\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) goto register_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) driver_algs[i].is_registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) register_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) chcr_unregister_alg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) * start_crypto - Register the crypto algorithms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) * This should called once when the first device comesup. After this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) * kernel will start calling driver APIs for crypto operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) int start_crypto(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) return chcr_register_alg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) * stop_crypto - Deregister all the crypto algorithms with kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) * This should be called once when the last device goes down. After this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) * kernel will not call the driver API for crypto operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) int stop_crypto(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) chcr_unregister_alg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) }