^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Intel IXP4xx NPE-C crypto driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <crypto/ctr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <crypto/internal/des.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <crypto/aes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <crypto/hmac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <crypto/sha.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <crypto/algapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <crypto/authenc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/soc/ixp4xx/npe.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/soc/ixp4xx/qmgr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define MAX_KEYLEN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define NPE_CTX_LEN 80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define AES_BLOCK128 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define NPE_OP_HASH_VERIFY 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define NPE_OP_CCM_ENABLE 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define NPE_OP_CRYPT_ENABLE 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define NPE_OP_HASH_ENABLE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define NPE_OP_NOT_IN_PLACE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define NPE_OP_HMAC_DISABLE 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define NPE_OP_CRYPT_ENCRYPT 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define NPE_OP_CCM_GEN_MIC 0xcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define NPE_OP_HASH_GEN_ICV 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NPE_OP_ENC_GEN_KEY 0xc9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define MOD_ECB 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MOD_CTR 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define MOD_CBC_ENC 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define MOD_CBC_DEC 0x3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MOD_CCM_ENC 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define MOD_CCM_DEC 0x5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define KEYLEN_128 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define KEYLEN_192 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define KEYLEN_256 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define CIPH_DECR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define CIPH_ENCR 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define MOD_DES 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define MOD_TDEA2 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define MOD_3DES 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define MOD_AES 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define MOD_AES128 (0x0800 | KEYLEN_128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define MOD_AES192 (0x0900 | KEYLEN_192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define MOD_AES256 (0x0a00 | KEYLEN_256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define MAX_IVLEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define NPE_ID 2 /* NPE C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define NPE_QLEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Space for registering when the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * NPE_QLEN crypt_ctl are busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define NPE_QLEN_TOTAL 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SEND_QID 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define RECV_QID 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define CTL_FLAG_UNUSED 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define CTL_FLAG_USED 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define CTL_FLAG_PERFORM_ABLK 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define CTL_FLAG_GEN_ICV 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define CTL_FLAG_GEN_REVAES 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define CTL_FLAG_PERFORM_AEAD 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define CTL_FLAG_MASK 0x000f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define MD5_DIGEST_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct buffer_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 phys_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifdef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u16 buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u16 pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u16 pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u16 buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dma_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 __reserved[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct buffer_desc *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct crypt_ctl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifdef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u8 mode; /* NPE_OP_* operation mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u8 init_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 init_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u8 mode; /* NPE_OP_* operation mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) dma_addr_t icv_rev_aes; /* icv or rev aes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dma_addr_t src_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) dma_addr_t dst_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #ifdef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u16 auth_offs; /* Authentication start offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u16 auth_len; /* Authentication data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u16 crypt_offs; /* Cryption start offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u16 crypt_len; /* Cryption data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u16 auth_len; /* Authentication data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u16 auth_offs; /* Authentication start offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u16 crypt_len; /* Cryption data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u16 crypt_offs; /* Cryption start offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 crypto_ctx; /* NPE Crypto Param structure address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Used by Host: 4*4 bytes*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned ctl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct skcipher_request *ablk_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct aead_request *aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct crypto_tfm *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct buffer_desc *regist_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u8 *regist_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct ablk_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct buffer_desc *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct buffer_desc *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u8 iv[MAX_IVLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct aead_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct buffer_desc *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct buffer_desc *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct scatterlist ivlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* used when the hmac is not on one sg entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u8 *hmac_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct ix_hash_algo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u32 cfgword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned char *icv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct ix_sa_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned char *npe_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dma_addr_t npe_ctx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int npe_ctx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u8 npe_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct ixp_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct ix_sa_dir encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct ix_sa_dir decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int authkey_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u8 authkey[MAX_KEYLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int enckey_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u8 enckey[MAX_KEYLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u8 salt[MAX_IVLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u8 nonce[CTR_RFC3686_NONCE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned salted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) atomic_t configuring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ixp_alg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct skcipher_alg crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) const struct ix_hash_algo *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 cfg_enc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u32 cfg_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct ixp_aead_alg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct aead_alg crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) const struct ix_hash_algo *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 cfg_enc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 cfg_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static const struct ix_hash_algo hash_alg_md5 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .cfgword = 0xAA010004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static const struct ix_hash_algo hash_alg_sha1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .cfgword = 0x00000005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static struct npe *npe_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static struct dma_pool *buffer_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static struct dma_pool *ctx_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static struct crypt_ctl *crypt_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static dma_addr_t crypt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int support_aes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define DRIVER_NAME "ixp4xx_crypto"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int setup_crypt_desc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) crypt_virt = dma_alloc_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) NPE_QLEN * sizeof(struct crypt_ctl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) &crypt_phys, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!crypt_virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static spinlock_t desc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct crypt_ctl *get_crypt_desc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_lock_irqsave(&desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (unlikely(!crypt_virt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) setup_crypt_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (unlikely(!crypt_virt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock_irqrestore(&desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) i = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (++idx >= NPE_QLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) crypt_virt[i].ctl_flags = CTL_FLAG_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) spin_unlock_irqrestore(&desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return crypt_virt +i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock_irqrestore(&desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static spinlock_t emerg_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static struct crypt_ctl *get_crypt_desc_emerg(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int idx = NPE_QLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct crypt_ctl *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) desc = get_crypt_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (unlikely(!crypt_virt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_lock_irqsave(&emerg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) i = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (++idx >= NPE_QLEN_TOTAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) idx = NPE_QLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) crypt_virt[i].ctl_flags = CTL_FLAG_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_unlock_irqrestore(&emerg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return crypt_virt +i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock_irqrestore(&emerg_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dma_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) while (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct buffer_desc *buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 phys1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) buf1 = buf->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) phys1 = buf->phys_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_pool_free(buffer_pool, buf, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) buf = buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) phys = phys1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static struct tasklet_struct crypto_done_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static void finish_scattered_hmac(struct crypt_ctl *crypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct aead_request *req = crypt->data.aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct aead_ctx *req_ctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int decryptlen = req->assoclen + req->cryptlen - authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (req_ctx->encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) scatterwalk_map_and_copy(req_ctx->hmac_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) req->dst, decryptlen, authsize, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void one_packet(dma_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct crypt_ctl *crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct ixp_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) failed = phys & 0x1 ? -EBADMSG : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) phys &= ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) crypt = crypt_phys2virt(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) switch (crypt->ctl_flags & CTL_FLAG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case CTL_FLAG_PERFORM_AEAD: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct aead_request *req = crypt->data.aead_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct aead_ctx *req_ctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) free_buf_chain(dev, req_ctx->src, crypt->src_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (req_ctx->hmac_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) finish_scattered_hmac(crypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) req->base.complete(&req->base, failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case CTL_FLAG_PERFORM_ABLK: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct skcipher_request *req = crypt->data.ablk_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int ivsize = crypto_skcipher_ivsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (ivsize > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) offset = req->cryptlen - ivsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (req_ctx->encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) scatterwalk_map_and_copy(req->iv, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) offset, ivsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) memcpy(req->iv, req_ctx->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) memzero_explicit(req_ctx->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (req_ctx->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) free_buf_chain(dev, req_ctx->src, crypt->src_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) req->base.complete(&req->base, failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case CTL_FLAG_GEN_ICV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ctx = crypto_tfm_ctx(crypt->data.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dma_pool_free(ctx_pool, crypt->regist_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) crypt->regist_buf->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (atomic_dec_and_test(&ctx->configuring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) complete(&ctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) case CTL_FLAG_GEN_REVAES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ctx = crypto_tfm_ctx(crypt->data.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (atomic_dec_and_test(&ctx->configuring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) complete(&ctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) crypt->ctl_flags = CTL_FLAG_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void irqhandler(void *_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tasklet_schedule(&crypto_done_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void crypto_done_action(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) for(i=0; i<4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dma_addr_t phys = qmgr_get_entry(RECV_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) one_packet(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) tasklet_schedule(&crypto_done_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int init_ixp_crypto(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) u32 msg[2] = { 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) npe_c = npe_request(NPE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!npe_c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!npe_running(npe_c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto npe_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto npe_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (npe_send_message(npe_c, msg, "STATUS_MSG"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto npe_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto npe_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) switch ((msg[1]>>16) & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) printk(KERN_WARNING "Firmware of %s lacks AES support\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) npe_name(npe_c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) support_aes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) support_aes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) printk(KERN_ERR "Firmware of %s lacks crypto support\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) npe_name(npe_c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto npe_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* buffer_pool will also be used to sometimes store the hmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * so assure it is large enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) buffer_pool = dma_pool_create("buffer", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) sizeof(struct buffer_desc), 32, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!buffer_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ctx_pool = dma_pool_create("context", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) NPE_CTX_LEN, 16, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!ctx_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) "ixp_crypto:out", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) "ixp_crypto:in", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) qmgr_release_queue(SEND_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) qmgr_enable_irq(RECV_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) npe_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dma_pool_destroy(ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dma_pool_destroy(buffer_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) npe_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) npe_release(npe_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void release_ixp_crypto(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) qmgr_disable_irq(RECV_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tasklet_kill(&crypto_done_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) qmgr_release_queue(SEND_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) qmgr_release_queue(RECV_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dma_pool_destroy(ctx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) dma_pool_destroy(buffer_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) npe_release(npe_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (crypt_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dma_free_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) NPE_QLEN * sizeof(struct crypt_ctl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) crypt_virt, crypt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static void reset_sa_dir(struct ix_sa_dir *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) memset(dir->npe_ctx, 0, NPE_CTX_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dir->npe_ctx_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dir->npe_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int init_sa_dir(struct ix_sa_dir *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!dir->npe_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) reset_sa_dir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void free_sa_dir(struct ix_sa_dir *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) memset(dir->npe_ctx, 0, NPE_CTX_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int init_tfm(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) atomic_set(&ctx->configuring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ret = init_sa_dir(&ctx->encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ret = init_sa_dir(&ctx->decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) free_sa_dir(&ctx->encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int init_tfm_ablk(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return init_tfm(crypto_skcipher_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int init_tfm_aead(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return init_tfm(crypto_aead_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void exit_tfm(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) free_sa_dir(&ctx->encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) free_sa_dir(&ctx->decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void exit_tfm_ablk(struct crypto_skcipher *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) exit_tfm(crypto_skcipher_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static void exit_tfm_aead(struct crypto_aead *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) exit_tfm(crypto_aead_tfm(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int init_len, u32 ctx_addr, const u8 *key, int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct crypt_ctl *crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct buffer_desc *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) u8 *pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dma_addr_t pad_phys, buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!pad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dma_pool_free(ctx_pool, pad, pad_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) crypt = get_crypt_desc_emerg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!crypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_pool_free(ctx_pool, pad, pad_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dma_pool_free(buffer_pool, buf, buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) memcpy(pad, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pad[i] ^= xpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) crypt->data.tfm = tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) crypt->regist_ptr = pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) crypt->regist_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) crypt->auth_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) crypt->auth_len = HMAC_PAD_BLOCKLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) crypt->crypto_ctx = ctx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) crypt->src_buf = buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) crypt->icv_rev_aes = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) crypt->mode = NPE_OP_HASH_GEN_ICV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) crypt->init_len = init_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) buf->next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) buf->buf_len = HMAC_PAD_BLOCKLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) buf->pkt_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) buf->phys_addr = pad_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) atomic_inc(&ctx->configuring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) BUG_ON(qmgr_stat_overflow(SEND_QID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) const u8 *key, int key_len, unsigned digest_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u32 itarget, otarget, npe_ctx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned char *cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int init_len, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u32 cfgword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct ix_sa_dir *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) const struct ix_hash_algo *algo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cinfo = dir->npe_ctx + dir->npe_ctx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) algo = ix_hash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* write cfg word to cryptinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #ifndef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cfgword ^= 0xAA000000; /* change the "byte swap" flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) *(u32*)cinfo = cpu_to_be32(cfgword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) cinfo += sizeof(cfgword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* write ICV to cryptinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) memcpy(cinfo, algo->icv, digest_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) cinfo += digest_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) + sizeof(algo->cfgword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) otarget = itarget + digest_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dir->npe_ctx_idx += init_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) dir->npe_mode |= NPE_OP_HASH_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dir->npe_mode |= NPE_OP_HASH_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) init_len, npe_ctx_addr, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) init_len, npe_ctx_addr, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int gen_rev_aes_key(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct crypt_ctl *crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct ix_sa_dir *dir = &ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) crypt = get_crypt_desc_emerg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!crypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) crypt->data.tfm = tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) crypt->crypt_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) crypt->crypt_len = AES_BLOCK128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) crypt->src_buf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) crypt->crypto_ctx = dir->npe_ctx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) crypt->mode = NPE_OP_ENC_GEN_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) crypt->init_len = dir->npe_ctx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) atomic_inc(&ctx->configuring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) BUG_ON(qmgr_stat_overflow(SEND_QID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) const u8 *key, int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u8 *cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u32 cipher_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) u32 keylen_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct ix_sa_dir *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cinfo = dir->npe_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) cipher_cfg = cipher_cfg_enc(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) cipher_cfg = cipher_cfg_dec(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (cipher_cfg & MOD_AES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) switch (key_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) case 16: keylen_cfg = MOD_AES128; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) case 24: keylen_cfg = MOD_AES192; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) case 32: keylen_cfg = MOD_AES256; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) cipher_cfg |= keylen_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) err = crypto_des_verify_key(tfm, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* write cfg word to cryptinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) *(u32*)cinfo = cpu_to_be32(cipher_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) cinfo += sizeof(cipher_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* write cipher key to cryptinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) memcpy(cinfo, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) key_len = DES3_EDE_KEY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if ((cipher_cfg & MOD_AES) && !encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return gen_rev_aes_key(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static struct buffer_desc *chainup_buffers(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct scatterlist *sg, unsigned nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct buffer_desc *buf, gfp_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) for (; nbytes > 0; sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned len = min(nbytes, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct buffer_desc *next_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dma_addr_t next_buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) nbytes -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ptr = sg_virt(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!next_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) buf->next = next_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) buf->phys_next = next_buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) buf = next_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) buf->phys_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) buf->buf_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) buf->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) buf->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) buf->phys_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) unsigned int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) init_completion(&ctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) atomic_inc(&ctx->configuring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) reset_sa_dir(&ctx->encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) reset_sa_dir(&ctx->decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ret = setup_cipher(&tfm->base, 0, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ret = setup_cipher(&tfm->base, 1, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!atomic_dec_and_test(&ctx->configuring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) wait_for_completion(&ctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return verify_skcipher_des3_key(tfm, key) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ablk_setkey(tfm, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned int key_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* the nonce is stored in bytes at end of key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (key_len < CTR_RFC3686_NONCE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) key_len -= CTR_RFC3686_NONCE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return ablk_setkey(tfm, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int ablk_perform(struct skcipher_request *req, int encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) unsigned ivsize = crypto_skcipher_ivsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct ix_sa_dir *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct crypt_ctl *crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) unsigned int nbytes = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct buffer_desc src_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (qmgr_stat_full(SEND_QID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (atomic_read(&ctx->configuring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) req_ctx->encrypt = encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) crypt = get_crypt_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (!crypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) crypt->data.ablk_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) crypt->crypto_ctx = dir->npe_ctx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) crypt->mode = dir->npe_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) crypt->init_len = dir->npe_ctx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) crypt->crypt_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) crypt->crypt_len = nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) BUG_ON(ivsize && !req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) memcpy(crypt->iv, req->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (ivsize > 0 && !encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) offset = req->cryptlen - ivsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (req->src != req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct buffer_desc dst_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) crypt->mode |= NPE_OP_NOT_IN_PLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* This was never tested by Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * for more than one dst buffer, I think. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) req_ctx->dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) flags, DMA_FROM_DEVICE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) goto free_buf_dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) src_direction = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) req_ctx->dst = dst_hook.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) crypt->dst_buf = dst_hook.phys_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) req_ctx->dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) req_ctx->src = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) flags, src_direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto free_buf_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) req_ctx->src = src_hook.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) crypt->src_buf = src_hook.phys_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) BUG_ON(qmgr_stat_overflow(SEND_QID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) free_buf_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) free_buf_chain(dev, req_ctx->src, crypt->src_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) free_buf_dest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (req->src != req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) crypt->ctl_flags = CTL_FLAG_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) static int ablk_encrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return ablk_perform(req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static int ablk_decrypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return ablk_perform(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static int ablk_rfc3686_crypt(struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) u8 iv[CTR_RFC3686_BLOCK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u8 *info = req->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* set up counter block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* initialize counter portion of counter block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) req->iv = iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ret = ablk_perform(req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) req->iv = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int aead_perform(struct aead_request *req, int encrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int cryptoffset, int eff_cryptlen, u8 *iv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct crypto_aead *tfm = crypto_aead_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) unsigned ivsize = crypto_aead_ivsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) unsigned authsize = crypto_aead_authsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct ix_sa_dir *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct crypt_ctl *crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) unsigned int cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct buffer_desc *buf, src_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct aead_ctx *req_ctx = aead_request_ctx(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) GFP_KERNEL : GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned int lastlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (qmgr_stat_full(SEND_QID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (atomic_read(&ctx->configuring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) dir = &ctx->encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) cryptlen = req->cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dir = &ctx->decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* req->cryptlen includes the authsize when decrypting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) cryptlen = req->cryptlen -authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) eff_cryptlen -= authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) crypt = get_crypt_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!crypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) crypt->data.aead_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) crypt->crypto_ctx = dir->npe_ctx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) crypt->mode = dir->npe_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) crypt->init_len = dir->npe_ctx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) crypt->crypt_offs = cryptoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) crypt->crypt_len = eff_cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) crypt->auth_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) crypt->auth_len = req->assoclen + cryptlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) BUG_ON(ivsize && !req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) memcpy(crypt->iv, req->iv, ivsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) buf = chainup_buffers(dev, req->src, crypt->auth_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) &src_hook, flags, src_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) req_ctx->src = src_hook.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) crypt->src_buf = src_hook.phys_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto free_buf_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) lastlen = buf->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (lastlen >= authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) crypt->icv_rev_aes = buf->phys_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) buf->buf_len - authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) req_ctx->dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (req->src != req->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct buffer_desc dst_hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) crypt->mode |= NPE_OP_NOT_IN_PLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) src_direction = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) buf = chainup_buffers(dev, req->dst, crypt->auth_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) &dst_hook, flags, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) req_ctx->dst = dst_hook.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) crypt->dst_buf = dst_hook.phys_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) goto free_buf_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) lastlen = buf->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (lastlen >= authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) crypt->icv_rev_aes = buf->phys_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) buf->buf_len - authsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (unlikely(lastlen < authsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* The 12 hmac bytes are scattered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * we need to copy them into a safe buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) &crypt->icv_rev_aes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (unlikely(!req_ctx->hmac_virt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) goto free_buf_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) scatterwalk_map_and_copy(req_ctx->hmac_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) req->src, cryptlen, authsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) req_ctx->encrypt = encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) req_ctx->hmac_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) BUG_ON(qmgr_stat_overflow(SEND_QID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) free_buf_dst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) free_buf_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) free_buf_chain(dev, req_ctx->src, crypt->src_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) crypt->ctl_flags = CTL_FLAG_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) unsigned digest_len = crypto_aead_maxauthsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!ctx->enckey_len && !ctx->authkey_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) init_completion(&ctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) atomic_inc(&ctx->configuring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) reset_sa_dir(&ctx->encrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) reset_sa_dir(&ctx->decrypt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ctx->authkey_len, digest_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ctx->authkey_len, digest_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (!atomic_dec_and_test(&ctx->configuring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) wait_for_completion(&ctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int max = crypto_aead_maxauthsize(tfm) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return aead_setup(tfm, authsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct crypto_authenc_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (keys.authkeylen > sizeof(ctx->authkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (keys.enckeylen > sizeof(ctx->enckey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ctx->authkey_len = keys.authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ctx->enckey_len = keys.enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return aead_setup(tfm, crypto_aead_authsize(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) badkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) unsigned int keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct crypto_authenc_keys keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) err = crypto_authenc_extractkeys(&keys, key, keylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (keys.authkeylen > sizeof(ctx->authkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) goto badkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ctx->authkey_len = keys.authkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ctx->enckey_len = keys.enckeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return aead_setup(tfm, crypto_aead_authsize(tfm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) badkey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) memzero_explicit(&keys, sizeof(keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static int aead_encrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static int aead_decrypt(struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static struct ixp_alg ixp4xx_algos[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .base.cra_name = "cbc(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) .min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) .max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) .base.cra_name = "ecb(des)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) .base.cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) .min_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) .max_keysize = DES_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) .base.cra_name = "cbc(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) .min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) .max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) .setkey = ablk_des3_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) .base.cra_name = "ecb(des3_ede)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) .min_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .max_keysize = DES3_EDE_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .setkey = ablk_des3_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .base.cra_name = "cbc(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .base.cra_name = "ecb(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) .base.cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .base.cra_name = "ctr(aes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) .base.cra_name = "rfc3686(ctr(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) .base.cra_blocksize = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) .min_keysize = AES_MIN_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .max_keysize = AES_MAX_KEY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .setkey = ablk_rfc3686_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .encrypt = ablk_rfc3686_crypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .decrypt = ablk_rfc3686_crypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static struct ixp_aead_alg ixp4xx_aeads[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .cra_name = "authenc(hmac(md5),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .hash = &hash_alg_md5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) .setkey = des3_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .hash = &hash_alg_md5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) .cra_name = "authenc(hmac(sha1),cbc(des))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) .cra_blocksize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) .ivsize = DES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .hash = &hash_alg_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .cra_blocksize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) .ivsize = DES3_EDE_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .setkey = des3_aead_setkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .hash = &hash_alg_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) .cra_name = "authenc(hmac(md5),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) .maxauthsize = MD5_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) .hash = &hash_alg_md5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) .crypto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) .base = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) .cra_name = "authenc(hmac(sha1),cbc(aes))",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .cra_blocksize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) .ivsize = AES_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .maxauthsize = SHA1_DIGEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) .hash = &hash_alg_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) #define IXP_POSTFIX "-ixp4xx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static const struct platform_device_info ixp_dev_info __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .id = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .dma_mask = DMA_BIT_MASK(32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static int __init ixp_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) int num = ARRAY_SIZE(ixp4xx_algos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) pdev = platform_device_register_full(&ixp_dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (IS_ERR(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return PTR_ERR(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) spin_lock_init(&desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) spin_lock_init(&emerg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) err = init_ixp_crypto(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) platform_device_unregister(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) for (i=0; i< num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) "%s"IXP_POSTFIX, cra->base.cra_name) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) CRYPTO_MAX_ALG_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* block ciphers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) CRYPTO_ALG_ALLOCATES_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!cra->setkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) cra->setkey = ablk_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (!cra->encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) cra->encrypt = ablk_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (!cra->decrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) cra->decrypt = ablk_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) cra->init = init_tfm_ablk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) cra->exit = exit_tfm_ablk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) cra->base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) cra->base.cra_alignmask = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) cra->base.cra_priority = 300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (crypto_register_skcipher(cra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) printk(KERN_ERR "Failed to register '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) cra->base.cra_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) ixp4xx_algos[i].registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) "%s"IXP_POSTFIX, cra->base.cra_name) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) CRYPTO_MAX_ALG_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /* authenc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) CRYPTO_ALG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) CRYPTO_ALG_ALLOCATES_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) cra->setkey = cra->setkey ?: aead_setkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) cra->setauthsize = aead_setauthsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) cra->encrypt = aead_encrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) cra->decrypt = aead_decrypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) cra->init = init_tfm_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) cra->exit = exit_tfm_aead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) cra->base.cra_module = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) cra->base.cra_alignmask = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) cra->base.cra_priority = 300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (crypto_register_aead(cra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) printk(KERN_ERR "Failed to register '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) cra->base.cra_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) ixp4xx_aeads[i].registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static void __exit ixp_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int num = ARRAY_SIZE(ixp4xx_algos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (ixp4xx_aeads[i].registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) for (i=0; i< num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (ixp4xx_algos[i].registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) release_ixp_crypto(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) platform_device_unregister(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) module_init(ixp_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) module_exit(ixp_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) MODULE_DESCRIPTION("IXP4xx hardware crypto");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)