Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Shared glue code for 128bit block ciphers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * CTR part based on code (crypto/ctr.c) by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <crypto/b128ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <crypto/gf128mul.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <crypto/xts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/crypto/glue_helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 			struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	const unsigned int bsize = 128 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	bool fpu_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	while ((nbytes = walk.nbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		const u8 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		u8 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		unsigned int func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 					     &walk, fpu_enabled, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		for (i = 0; i < gctx->num_funcs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			func_bytes = bsize * gctx->funcs[i].num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			if (nbytes < func_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			/* Process multi-block batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 				gctx->funcs[i].fn_u.ecb(ctx, dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 				src += func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 				dst += func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				nbytes -= func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			} while (nbytes >= func_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			if (nbytes < bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	glue_fpu_end(fpu_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	const unsigned int bsize = 128 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	while ((nbytes = walk.nbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		const u128 *src = (u128 *)walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		u128 *dst = (u128 *)walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		u128 *iv = (u128 *)walk.iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			u128_xor(dst, src, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			fn(ctx, (u8 *)dst, (u8 *)dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			iv = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			nbytes -= bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		} while (nbytes >= bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		*(u128 *)walk.iv = *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	const unsigned int bsize = 128 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	bool fpu_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	while ((nbytes = walk.nbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		const u128 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		u128 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		unsigned int func_bytes, num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		u128 last_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 					     &walk, fpu_enabled, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		/* Start of the last block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		src += nbytes / bsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		dst += nbytes / bsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		last_iv = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		for (i = 0; i < gctx->num_funcs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			num_blocks = gctx->funcs[i].num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			func_bytes = bsize * num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			if (nbytes < func_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			/* Process multi-block batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				src -= num_blocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				dst -= num_blocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 				gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 							(const u8 *)src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				nbytes -= func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 				if (nbytes < bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 					goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				u128_xor(dst, dst, --src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				dst--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			} while (nbytes >= func_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		u128_xor(dst, dst, (u128 *)walk.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		*(u128 *)walk.iv = last_iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	glue_fpu_end(fpu_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	const unsigned int bsize = 128 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	bool fpu_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	unsigned int nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	while ((nbytes = walk.nbytes) >= bsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		const u128 *src = walk.src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		u128 *dst = walk.dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		unsigned int func_bytes, num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		le128 ctrblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 					     &walk, fpu_enabled, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		be128_to_le128(&ctrblk, (be128 *)walk.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		for (i = 0; i < gctx->num_funcs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			num_blocks = gctx->funcs[i].num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			func_bytes = bsize * num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			if (nbytes < func_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			/* Process multi-block batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 				gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 							(const u8 *)src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 							&ctrblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 				src += num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 				dst += num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				nbytes -= func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			} while (nbytes >= func_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			if (nbytes < bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		le128_to_be128((be128 *)walk.iv, &ctrblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	glue_fpu_end(fpu_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		le128 ctrblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		u128 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		be128_to_le128(&ctrblk, (be128 *)walk.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		memcpy(&tmp, walk.src.virt.addr, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 							  (const u8 *)&tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 							  &ctrblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		memcpy(walk.dst.virt.addr, &tmp, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		le128_to_be128((be128 *)walk.iv, &ctrblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		err = skcipher_walk_done(&walk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 					  void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 					  struct skcipher_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	const unsigned int bsize = 128 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	unsigned int nbytes = walk->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	u128 *src = walk->src.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	u128 *dst = walk->dst.virt.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	unsigned int num_blocks, func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	/* Process multi-block batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	for (i = 0; i < gctx->num_funcs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		num_blocks = gctx->funcs[i].num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		func_bytes = bsize * num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		if (nbytes >= func_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 				gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 							(const u8 *)src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 							walk->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				src += num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 				dst += num_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				nbytes -= func_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			} while (nbytes >= func_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			if (nbytes < bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	return nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			struct skcipher_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			common_glue_func_t tweak_fn, void *tweak_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			void *crypt_ctx, bool decrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	const unsigned int bsize = 128 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct skcipher_request subreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct skcipher_walk walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	bool fpu_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	unsigned int nbytes, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (req->cryptlen < XTS_BLOCK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (unlikely(cts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		skcipher_request_set_tfm(&subreq, tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		skcipher_request_set_callback(&subreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 					      crypto_skcipher_get_flags(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 					      NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 					   req->cryptlen - tail, req->iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		req = &subreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	err = skcipher_walk_virt(&walk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	nbytes = walk.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	/* set minimum length to bsize, for tweak_fn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				     &walk, fpu_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 				     nbytes < bsize ? bsize : nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/* calculate first value of T */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	tweak_fn(tweak_ctx, walk.iv, walk.iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	while (nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		err = skcipher_walk_done(&walk, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		nbytes = walk.nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (unlikely(cts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		u8 *next_tweak, *final_tweak = req->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		struct scatterlist *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		struct scatterlist s[2], d[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		le128 b[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		if (req->dst != req->src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			dst = scatterwalk_ffwd(d, req->dst, req->cryptlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		if (decrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			gf128mul_x_ble(b, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			next_tweak = req->iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 					   next_tweak);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		err = skcipher_walk_virt(&walk, req, false) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		      skcipher_walk_done(&walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				__glue_xts_req_128bit(gctx, crypt_ctx, &walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		memcpy(b + 1, b, tail - XTS_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 					 tail - XTS_BLOCK_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		scatterwalk_map_and_copy(b, dst, 0, tail, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 					   final_tweak);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		err = skcipher_walk_virt(&walk, req, false) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		      skcipher_walk_done(&walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 				__glue_xts_req_128bit(gctx, crypt_ctx, &walk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	glue_fpu_end(fpu_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			       le128 *iv, common_glue_func_t fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	le128 ivblk = *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	/* generate next IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	gf128mul_x_ble(iv, &ivblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	/* CC <- T xor C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	/* PP <- D(Key2,CC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	fn(ctx, dst, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	/* P <- T xor PP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) MODULE_LICENSE("GPL");