From 20342e3f64fb8eea86e490f57a3c0a4ace4284c1 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 31 Oct 2023 20:17:24 -0700 Subject: crypto: x86/sha1 - autoload if SHA-NI detected The x86 SHA-1 module contains four implementations: SSSE3, AVX, AVX2, and SHA-NI. Commit 1c43c0f1f84a ("crypto: x86/sha - load modules based on CPU features") made the module be autoloaded when SSSE3, AVX, or AVX2 is detected. The omission of SHA-NI appears to be an oversight, perhaps because of the outdated file-level comment. This patch fixes this, though in practice this makes no difference because SSSE3 is a subset of the other three features anyway. Indeed, sha1_ni_transform() executes SSSE3 instructions such as pshufb. Reviewed-by: Roxana Nicolescu Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/sha1_ssse3_glue.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index 959afa705e95..ab8bc54f254d 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -2,8 +2,8 @@ /* * Cryptographic API. * - * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using - * Supplemental SSE3 instructions. + * Glue code for the SHA1 Secure Hash Algorithm assembler implementations + * using SSSE3, AVX, AVX2, and SHA-NI instructions. * * This file is based on sha1_generic.c * @@ -28,6 +28,9 @@ #include static const struct x86_cpu_id module_cpu_ids[] = { +#ifdef CONFIG_AS_SHA1_NI + X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL), +#endif X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), -- cgit v1.2.3 From ba5a434d5a1e799defd964537bdd406318ef5f7d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 31 Oct 2023 20:18:11 -0700 Subject: crypto: x86/sha256 - autoload if SHA-NI detected The x86 SHA-256 module contains four implementations: SSSE3, AVX, AVX2, and SHA-NI. Commit 1c43c0f1f84a ("crypto: x86/sha - load modules based on CPU features") made the module be autoloaded when SSSE3, AVX, or AVX2 is detected. The omission of SHA-NI appears to be an oversight, perhaps because of the outdated file-level comment. This patch fixes this, though in practice this makes no difference because SSSE3 is a subset of the other three features anyway. Indeed, sha256_ni_transform() executes SSSE3 instructions such as pshufb. Reviewed-by: Roxana Nicolescu Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/sha256_ssse3_glue.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 4c0383a90e11..e04a43d9f7d5 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -1,8 +1,8 @@ /* * Cryptographic API. * - * Glue code for the SHA256 Secure Hash Algorithm assembler - * implementation using supplemental SSE3 / AVX / AVX2 instructions. + * Glue code for the SHA256 Secure Hash Algorithm assembler implementations + * using SSSE3, AVX, AVX2, and SHA-NI instructions. * * This file is based on sha256_generic.c * @@ -45,6 +45,9 @@ asmlinkage void sha256_transform_ssse3(struct sha256_state *state, const u8 *data, int blocks); static const struct x86_cpu_id module_cpu_ids[] = { +#ifdef CONFIG_AS_SHA256_NI + X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL), +#endif X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), -- cgit v1.2.3 From aaa03fdb56c781db4a4831dd5d6ec8817918c726 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 21 Nov 2023 12:52:44 -0600 Subject: crypto: p10-aes-gcm - Avoid -Wstringop-overflow warnings The compiler doesn't know that `32` is an offset into the Hash table: 56 struct Hash_ctx { 57 u8 H[16]; /* subkey */ 58 u8 Htable[256]; /* Xi, Hash table(offset 32) */ 59 }; So, it legitimately complains about a potential out-of-bounds issue if `256 bytes` are accessed in `htable` (this implies going `32 bytes` beyond the boundaries of `Htable`): arch/powerpc/crypto/aes-gcm-p10-glue.c: In function 'gcmp10_init': arch/powerpc/crypto/aes-gcm-p10-glue.c:120:9: error: 'gcm_init_htable' accessing 256 bytes in a region of size 224 [-Werror=stringop-overflow=] 120 | gcm_init_htable(hash->Htable+32, hash->H); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ arch/powerpc/crypto/aes-gcm-p10-glue.c:120:9: note: referencing argument 1 of type 'unsigned char[256]' arch/powerpc/crypto/aes-gcm-p10-glue.c:120:9: note: referencing argument 2 of type 'unsigned char[16]' arch/powerpc/crypto/aes-gcm-p10-glue.c:40:17: note: in a call to function 'gcm_init_htable' 40 | asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]); | ^~~~~~~~~~~~~~~ Address this by avoiding specifying the size of `htable` in the function prototype; and just for consistency, do the same for parameter `Xi`. Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/linux-next/20231121131903.68a37932@canb.auug.org.au/ Signed-off-by: Gustavo A. R. Silva Signed-off-by: Herbert Xu --- arch/powerpc/crypto/aes-gcm-p10-glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index 4b6e899895e7..f62ee54076c0 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -37,7 +37,7 @@ asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); -asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]); +asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]); asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable, unsigned char *aad, unsigned int alen); -- cgit v1.2.3 From d07f951903fa9922c375b8ab1ce81b18a0034e3b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 28 Nov 2023 14:22:13 +0800 Subject: crypto: s390/aes - Fix buffer overread in CTR mode When processing the last block, the s390 ctr code will always read a whole block, even if there isn't a whole block of data left. Fix this by using the actual length left and copy it into a buffer first for processing. Fixes: 0200f3ecc196 ("crypto: s390 - add System z hardware support for CTR mode") Cc: Reported-by: Guangwu Zhang Signed-off-by: Herbert Xu Reviewd-by: Harald Freudenberger Signed-off-by: Herbert Xu --- arch/s390/crypto/aes_s390.c | 4 +++- arch/s390/crypto/paes_s390.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c773820e4af9..c6fe5405de4a 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -597,7 +597,9 @@ static int ctr_aes_crypt(struct skcipher_request *req) * final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { - cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr, + memset(buf, 0, AES_BLOCK_SIZE); + memcpy(buf, walk.src.virt.addr, nbytes); + cpacf_kmctr(sctx->fc, sctx->key, buf, buf, AES_BLOCK_SIZE, walk.iv); memcpy(walk.dst.virt.addr, buf, nbytes); crypto_inc(walk.iv, AES_BLOCK_SIZE); diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 8b541e44151d..55ee5567a5ea 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -693,9 +693,11 @@ static int ctr_paes_crypt(struct skcipher_request *req) * final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { + memset(buf, 0, AES_BLOCK_SIZE); + memcpy(buf, walk.src.virt.addr, nbytes); while (1) { if (cpacf_kmctr(ctx->fc, ¶m, buf, - walk.src.virt.addr, AES_BLOCK_SIZE, + buf, AES_BLOCK_SIZE, walk.iv) == AES_BLOCK_SIZE) break; if (__paes_convert_key(ctx)) -- cgit v1.2.3 From 29fa12e918e5f7fe9947aab2f325a4b54f4c5d99 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 16 Sep 2023 17:13:23 +0800 Subject: crypto: arm64/sm4 - Remove cfb(sm4) Remove the unused CFB implementation. Signed-off-by: Herbert Xu --- arch/arm64/crypto/Kconfig | 6 +- arch/arm64/crypto/sm4-ce-core.S | 158 -------------------------------------- arch/arm64/crypto/sm4-ce-glue.c | 108 +------------------------- arch/arm64/crypto/sm4-ce.h | 3 - arch/arm64/crypto/sm4-neon-core.S | 113 --------------------------- arch/arm64/crypto/sm4-neon-glue.c | 105 +------------------------ 6 files changed, 4 insertions(+), 489 deletions(-) (limited to 'arch') diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 6d06b448a66e..eb7b423ba463 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -231,7 +231,7 @@ config CRYPTO_SM4_ARM64_CE - NEON (Advanced SIMD) extensions config CRYPTO_SM4_ARM64_CE_BLK - tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR/XTS (ARMv8 Crypto Extensions)" + tristate "Ciphers: SM4, modes: ECB/CBC/CTR/XTS (ARMv8 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_SM4 @@ -240,7 +240,6 @@ config CRYPTO_SM4_ARM64_CE_BLK with block cipher modes: - ECB (Electronic Codebook) mode (NIST SP800-38A) - CBC (Cipher Block Chaining) mode (NIST SP800-38A) - - CFB (Cipher Feedback) mode (NIST SP800-38A) - CTR (Counter) mode (NIST SP800-38A) - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E and IEEE 1619) @@ -250,7 +249,7 @@ config CRYPTO_SM4_ARM64_CE_BLK - NEON (Advanced SIMD) extensions config CRYPTO_SM4_ARM64_NEON_BLK - tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (NEON)" + tristate "Ciphers: SM4, modes: ECB/CBC/CTR (NEON)" depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_SM4 @@ -259,7 +258,6 @@ config CRYPTO_SM4_ARM64_NEON_BLK with block cipher modes: - ECB (Electronic Codebook) mode (NIST SP800-38A) - CBC (Cipher Block Chaining) mode (NIST SP800-38A) - - CFB (Cipher Feedback) mode (NIST SP800-38A) - CTR (Counter) mode (NIST SP800-38A) Architecture: arm64 using: diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S index 877b80c54a0d..1f3625c2c67e 100644 --- a/arch/arm64/crypto/sm4-ce-core.S +++ b/arch/arm64/crypto/sm4-ce-core.S @@ -402,164 +402,6 @@ SYM_FUNC_START(sm4_ce_cbc_cts_dec) ret SYM_FUNC_END(sm4_ce_cbc_cts_dec) -.align 3 -SYM_FUNC_START(sm4_ce_cfb_enc) - /* input: - * x0: round key array, CTX - * x1: dst - * x2: src - * x3: iv (big endian, 128 bit) - * w4: nblocks - */ - SM4_PREPARE(x0) - - ld1 {RIV.16b}, [x3] - -.Lcfb_enc_loop_4x: - cmp w4, #4 - blt .Lcfb_enc_loop_1x - - sub w4, w4, #4 - - ld1 {v0.16b-v3.16b}, [x2], #64 - - rev32 v8.16b, RIV.16b - SM4_CRYPT_BLK_BE(v8) - eor v0.16b, v0.16b, v8.16b - - rev32 v8.16b, v0.16b - SM4_CRYPT_BLK_BE(v8) - eor v1.16b, v1.16b, v8.16b - - rev32 v8.16b, v1.16b - SM4_CRYPT_BLK_BE(v8) - eor v2.16b, v2.16b, v8.16b - - rev32 v8.16b, v2.16b - SM4_CRYPT_BLK_BE(v8) - eor v3.16b, v3.16b, v8.16b - - st1 {v0.16b-v3.16b}, [x1], #64 - mov RIV.16b, v3.16b - - cbz w4, .Lcfb_enc_end - b .Lcfb_enc_loop_4x - -.Lcfb_enc_loop_1x: - sub w4, w4, #1 - - ld1 {v0.16b}, [x2], #16 - - SM4_CRYPT_BLK(RIV) - eor RIV.16b, RIV.16b, v0.16b - - st1 {RIV.16b}, [x1], #16 - - cbnz w4, .Lcfb_enc_loop_1x - -.Lcfb_enc_end: - /* store new IV */ - st1 {RIV.16b}, [x3] - - ret -SYM_FUNC_END(sm4_ce_cfb_enc) - -.align 3 -SYM_FUNC_START(sm4_ce_cfb_dec) - /* input: - * x0: round key array, CTX - * x1: dst - * x2: src - * x3: iv (big endian, 128 bit) - * w4: nblocks - */ - SM4_PREPARE(x0) - - ld1 {RIV.16b}, [x3] - -.Lcfb_dec_loop_8x: - sub w4, w4, #8 - tbnz w4, #31, .Lcfb_dec_4x - - ld1 {v0.16b-v3.16b}, [x2], #64 - ld1 {v4.16b-v7.16b}, [x2], #64 - - rev32 v8.16b, RIV.16b - rev32 v9.16b, v0.16b - rev32 v10.16b, v1.16b - rev32 v11.16b, v2.16b - rev32 v12.16b, v3.16b - rev32 v13.16b, v4.16b - rev32 v14.16b, v5.16b - rev32 v15.16b, v6.16b - - SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15) - - mov RIV.16b, v7.16b - - eor v0.16b, v0.16b, v8.16b - eor v1.16b, v1.16b, v9.16b - eor v2.16b, v2.16b, v10.16b - eor v3.16b, v3.16b, v11.16b - eor v4.16b, v4.16b, v12.16b - eor v5.16b, v5.16b, v13.16b - eor v6.16b, v6.16b, v14.16b - eor v7.16b, v7.16b, v15.16b - - st1 {v0.16b-v3.16b}, [x1], #64 - st1 {v4.16b-v7.16b}, [x1], #64 - - cbz w4, .Lcfb_dec_end - b .Lcfb_dec_loop_8x - -.Lcfb_dec_4x: - add w4, w4, #8 - cmp w4, #4 - blt .Lcfb_dec_loop_1x - - sub w4, w4, #4 - - ld1 {v0.16b-v3.16b}, [x2], #64 - - rev32 v8.16b, RIV.16b - rev32 v9.16b, v0.16b - rev32 v10.16b, v1.16b - rev32 v11.16b, v2.16b - - SM4_CRYPT_BLK4_BE(v8, v9, v10, v11) - - mov RIV.16b, v3.16b - - eor v0.16b, v0.16b, v8.16b - eor v1.16b, v1.16b, v9.16b - eor v2.16b, v2.16b, v10.16b - eor v3.16b, v3.16b, v11.16b - - st1 {v0.16b-v3.16b}, [x1], #64 - - cbz w4, .Lcfb_dec_end - -.Lcfb_dec_loop_1x: - sub w4, w4, #1 - - ld1 {v0.16b}, [x2], #16 - - SM4_CRYPT_BLK(RIV) - - eor RIV.16b, RIV.16b, v0.16b - st1 {RIV.16b}, [x1], #16 - - mov RIV.16b, v0.16b - - cbnz w4, .Lcfb_dec_loop_1x - -.Lcfb_dec_end: - /* store new IV */ - st1 {RIV.16b}, [x3] - - ret -SYM_FUNC_END(sm4_ce_cfb_dec) - .align 3 SYM_FUNC_START(sm4_ce_ctr_enc) /* input: diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 0a2d32ed3bde..43741bed874e 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -37,10 +37,6 @@ asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src, u8 *iv, unsigned int nbytes); asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src, u8 *iv, unsigned int nbytes); -asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); -asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblks); asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src, u8 *iv, unsigned int nblks); asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src, @@ -56,7 +52,6 @@ asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest, EXPORT_SYMBOL(sm4_ce_expand_key); EXPORT_SYMBOL(sm4_ce_crypt_block); EXPORT_SYMBOL(sm4_ce_cbc_enc); -EXPORT_SYMBOL(sm4_ce_cfb_enc); struct sm4_xts_ctx { struct sm4_ctx key1; @@ -280,90 +275,6 @@ static int sm4_cbc_cts_decrypt(struct skcipher_request *req) return sm4_cbc_cts_crypt(req, false); } -static int sm4_cfb_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - unsigned int nblks; - - kernel_neon_begin(); - - nblks = BYTES2BLKS(nbytes); - if (nblks) { - sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } - - /* tail */ - if (walk.nbytes == walk.total && nbytes > 0) { - u8 keystream[SM4_BLOCK_SIZE]; - - sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); - crypto_xor_cpy(dst, src, keystream, nbytes); - nbytes = 0; - } - - kernel_neon_end(); - - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int sm4_cfb_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - unsigned int nblks; - - kernel_neon_begin(); - - nblks = BYTES2BLKS(nbytes); - if (nblks) { - sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks); - dst += nblks * SM4_BLOCK_SIZE; - src += nblks * SM4_BLOCK_SIZE; - nbytes -= nblks * SM4_BLOCK_SIZE; - } - - /* tail */ - if (walk.nbytes == walk.total && nbytes > 0) { - u8 keystream[SM4_BLOCK_SIZE]; - - sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); - crypto_xor_cpy(dst, src, keystream, nbytes); - nbytes = 0; - } - - kernel_neon_end(); - - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - static int sm4_ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -542,22 +453,6 @@ static struct skcipher_alg sm4_algs[] = { .setkey = sm4_setkey, .encrypt = sm4_cbc_encrypt, .decrypt = sm4_cbc_decrypt, - }, { - .base = { - .cra_name = "cfb(sm4)", - .cra_driver_name = "cfb-sm4-ce", - .cra_priority = 400, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, - }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .setkey = sm4_setkey, - .encrypt = sm4_cfb_encrypt, - .decrypt = sm4_cfb_decrypt, }, { .base = { .cra_name = "ctr(sm4)", @@ -869,12 +764,11 @@ static void __exit sm4_exit(void) module_cpu_feature_match(SM4, sm4_init); module_exit(sm4_exit); -MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR/XTS using ARMv8 Crypto Extensions"); +MODULE_DESCRIPTION("SM4 ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); MODULE_ALIAS_CRYPTO("sm4-ce"); MODULE_ALIAS_CRYPTO("sm4"); MODULE_ALIAS_CRYPTO("ecb(sm4)"); MODULE_ALIAS_CRYPTO("cbc(sm4)"); -MODULE_ALIAS_CRYPTO("cfb(sm4)"); MODULE_ALIAS_CRYPTO("ctr(sm4)"); MODULE_ALIAS_CRYPTO("cts(cbc(sm4))"); MODULE_ALIAS_CRYPTO("xts(sm4)"); diff --git a/arch/arm64/crypto/sm4-ce.h b/arch/arm64/crypto/sm4-ce.h index 109c21b37590..1e235c4371eb 100644 --- a/arch/arm64/crypto/sm4-ce.h +++ b/arch/arm64/crypto/sm4-ce.h @@ -11,6 +11,3 @@ void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); void sm4_ce_cbc_enc(const u32 *rkey_enc, u8 *dst, const u8 *src, u8 *iv, unsigned int nblocks); - -void sm4_ce_cfb_enc(const u32 *rkey_enc, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblocks); diff --git a/arch/arm64/crypto/sm4-neon-core.S b/arch/arm64/crypto/sm4-neon-core.S index f295b4b7d70a..734dc7193610 100644 --- a/arch/arm64/crypto/sm4-neon-core.S +++ b/arch/arm64/crypto/sm4-neon-core.S @@ -437,119 +437,6 @@ SYM_FUNC_START(sm4_neon_cbc_dec) ret SYM_FUNC_END(sm4_neon_cbc_dec) -.align 3 -SYM_FUNC_START(sm4_neon_cfb_dec) - /* input: - * x0: round key array, CTX - * x1: dst - * x2: src - * x3: iv (big endian, 128 bit) - * w4: nblocks - */ - SM4_PREPARE() - - ld1 {v0.16b}, [x3] - -.Lcfb_dec_loop_8x: - sub w4, w4, #8 - tbnz w4, #31, .Lcfb_dec_4x - - ld1 {v1.16b-v3.16b}, [x2], #48 - ld4 {v4.4s-v7.4s}, [x2] - - transpose_4x4(v0, v1, v2, v3) - - SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) - - sub x2, x2, #48 - ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64 - ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64 - - eor v0.16b, v0.16b, RTMP0.16b - eor v1.16b, v1.16b, RTMP1.16b - eor v2.16b, v2.16b, RTMP2.16b - eor v3.16b, v3.16b, RTMP3.16b - eor v4.16b, v4.16b, RTMP4.16b - eor v5.16b, v5.16b, RTMP5.16b - eor v6.16b, v6.16b, RTMP6.16b - eor v7.16b, v7.16b, RTMP7.16b - - st1 {v0.16b-v3.16b}, [x1], #64 - st1 {v4.16b-v7.16b}, [x1], #64 - - mov v0.16b, RTMP7.16b - - cbz w4, .Lcfb_dec_end - b .Lcfb_dec_loop_8x - -.Lcfb_dec_4x: - add w4, w4, #8 - cmp w4, #4 - blt .Lcfb_dec_tail - - sub w4, w4, #4 - - ld1 {v4.16b-v7.16b}, [x2], #64 - - rev32 v0.16b, v0.16b /* v0 is IV register */ - rev32 v1.16b, v4.16b - rev32 v2.16b, v5.16b - rev32 v3.16b, v6.16b - - transpose_4x4(v0, v1, v2, v3) - - SM4_CRYPT_BLK4_BE(v0, v1, v2, v3) - - eor v0.16b, v0.16b, v4.16b - eor v1.16b, v1.16b, v5.16b - eor v2.16b, v2.16b, v6.16b - eor v3.16b, v3.16b, v7.16b - - st1 {v0.16b-v3.16b}, [x1], #64 - - mov v0.16b, v7.16b - - cbz w4, .Lcfb_dec_end - -.Lcfb_dec_tail: - cmp w4, #2 - ld1 {v4.16b}, [x2], #16 - blt .Lcfb_dec_tail_load_done - ld1 {v5.16b}, [x2], #16 - beq .Lcfb_dec_tail_load_done - ld1 {v6.16b}, [x2], #16 - -.Lcfb_dec_tail_load_done: - rev32 v0.16b, v0.16b /* v0 is IV register */ - rev32 v1.16b, v4.16b - rev32 v2.16b, v5.16b - - transpose_4x4(v0, v1, v2, v3) - - SM4_CRYPT_BLK4_BE(v0, v1, v2, v3) - - cmp w4, #2 - eor v0.16b, v0.16b, v4.16b - st1 {v0.16b}, [x1], #16 - mov v0.16b, v4.16b - blt .Lcfb_dec_end - - eor v1.16b, v1.16b, v5.16b - st1 {v1.16b}, [x1], #16 - mov v0.16b, v5.16b - beq .Lcfb_dec_end - - eor v2.16b, v2.16b, v6.16b - st1 {v2.16b}, [x1], #16 - mov v0.16b, v6.16b - -.Lcfb_dec_end: - /* store new IV */ - st1 {v0.16b}, [x3] - - ret -SYM_FUNC_END(sm4_neon_cfb_dec) - .align 3 SYM_FUNC_START(sm4_neon_ctr_crypt) /* input: diff --git a/arch/arm64/crypto/sm4-neon-glue.c b/arch/arm64/crypto/sm4-neon-glue.c index 7b19accf5c03..e3500aca2d18 100644 --- a/arch/arm64/crypto/sm4-neon-glue.c +++ b/arch/arm64/crypto/sm4-neon-glue.c @@ -22,8 +22,6 @@ asmlinkage void sm4_neon_crypt(const u32 *rkey, u8 *dst, const u8 *src, unsigned int nblocks); asmlinkage void sm4_neon_cbc_dec(const u32 *rkey_dec, u8 *dst, const u8 *src, u8 *iv, unsigned int nblocks); -asmlinkage void sm4_neon_cfb_dec(const u32 *rkey_enc, u8 *dst, const u8 *src, - u8 *iv, unsigned int nblocks); asmlinkage void sm4_neon_ctr_crypt(const u32 *rkey_enc, u8 *dst, const u8 *src, u8 *iv, unsigned int nblocks); @@ -142,90 +140,6 @@ static int sm4_cbc_decrypt(struct skcipher_request *req) return err; } -static int sm4_cfb_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - u8 keystream[SM4_BLOCK_SIZE]; - const u8 *iv = walk.iv; - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - - while (nbytes >= SM4_BLOCK_SIZE) { - sm4_crypt_block(ctx->rkey_enc, keystream, iv); - crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE); - iv = dst; - src += SM4_BLOCK_SIZE; - dst += SM4_BLOCK_SIZE; - nbytes -= SM4_BLOCK_SIZE; - } - if (iv != walk.iv) - memcpy(walk.iv, iv, SM4_BLOCK_SIZE); - - /* tail */ - if (walk.nbytes == walk.total && nbytes > 0) { - sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); - crypto_xor_cpy(dst, src, keystream, nbytes); - nbytes = 0; - } - - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int sm4_cfb_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - unsigned int nblocks; - - nblocks = nbytes / SM4_BLOCK_SIZE; - if (nblocks) { - kernel_neon_begin(); - - sm4_neon_cfb_dec(ctx->rkey_enc, dst, src, - walk.iv, nblocks); - - kernel_neon_end(); - - dst += nblocks * SM4_BLOCK_SIZE; - src += nblocks * SM4_BLOCK_SIZE; - nbytes -= nblocks * SM4_BLOCK_SIZE; - } - - /* tail */ - if (walk.nbytes == walk.total && nbytes > 0) { - u8 keystream[SM4_BLOCK_SIZE]; - - sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); - crypto_xor_cpy(dst, src, keystream, nbytes); - nbytes = 0; - } - - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - static int sm4_ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -301,22 +215,6 @@ static struct skcipher_alg sm4_algs[] = { .setkey = sm4_setkey, .encrypt = sm4_cbc_encrypt, .decrypt = sm4_cbc_decrypt, - }, { - .base = { - .cra_name = "cfb(sm4)", - .cra_driver_name = "cfb-sm4-neon", - .cra_priority = 200, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, - }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .setkey = sm4_setkey, - .encrypt = sm4_cfb_encrypt, - .decrypt = sm4_cfb_decrypt, }, { .base = { .cra_name = "ctr(sm4)", @@ -349,12 +247,11 @@ static void __exit sm4_exit(void) module_init(sm4_init); module_exit(sm4_exit); -MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 NEON"); +MODULE_DESCRIPTION("SM4 ECB/CBC/CTR using ARMv8 NEON"); MODULE_ALIAS_CRYPTO("sm4-neon"); MODULE_ALIAS_CRYPTO("sm4"); MODULE_ALIAS_CRYPTO("ecb(sm4)"); MODULE_ALIAS_CRYPTO("cbc(sm4)"); -MODULE_ALIAS_CRYPTO("cfb(sm4)"); MODULE_ALIAS_CRYPTO("ctr(sm4)"); MODULE_AUTHOR("Tianjia Zhang "); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 05bd1e2a78a453910104b0fe4aa771d08cbeccf1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 16 Sep 2023 17:16:52 +0800 Subject: crypto: x86/sm4 - Remove cfb(sm4) Remove the unused CFB implementation. Signed-off-by: Herbert Xu --- arch/x86/crypto/Kconfig | 8 +- arch/x86/crypto/sm4-aesni-avx-asm_64.S | 52 ------------- arch/x86/crypto/sm4-aesni-avx2-asm_64.S | 55 -------------- arch/x86/crypto/sm4-avx.h | 4 - arch/x86/crypto/sm4_aesni_avx2_glue.c | 26 ------- arch/x86/crypto/sm4_aesni_avx_glue.c | 130 -------------------------------- 6 files changed, 4 insertions(+), 271 deletions(-) (limited to 'arch') diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 9bbfd01cfa2f..c9e59589a1ce 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -189,7 +189,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 Processes 16 blocks in parallel. config CRYPTO_SM4_AESNI_AVX_X86_64 - tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX)" + tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX)" depends on X86 && 64BIT select CRYPTO_SKCIPHER select CRYPTO_SIMD @@ -197,7 +197,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64 select CRYPTO_SM4 help Length-preserving ciphers: SM4 cipher algorithms - (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes + (OSCCA GB/T 32907-2016) with ECB, CBC, and CTR modes Architecture: x86_64 using: - AES-NI (AES New Instructions) @@ -210,7 +210,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64 If unsure, say N. config CRYPTO_SM4_AESNI_AVX2_X86_64 - tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX2)" + tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX2)" depends on X86 && 64BIT select CRYPTO_SKCIPHER select CRYPTO_SIMD @@ -219,7 +219,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 select CRYPTO_SM4_AESNI_AVX_X86_64 help Length-preserving ciphers: SM4 cipher algorithms - (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes + (OSCCA GB/T 32907-2016) with ECB, CBC, and CTR modes Architecture: x86_64 using: - AES-NI (AES New Instructions) diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S index e2668d2fe6ce..2bf611eaa191 100644 --- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S @@ -534,55 +534,3 @@ SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) - -/* - * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, - * const u8 *src, u8 *iv) - */ -SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) - /* input: - * %rdi: round key array, CTX - * %rsi: dst (8 blocks) - * %rdx: src (8 blocks) - * %rcx: iv - */ - FRAME_BEGIN - - /* Load input */ - vmovdqu (%rcx), RA0; - vmovdqu 0 * 16(%rdx), RA1; - vmovdqu 1 * 16(%rdx), RA2; - vmovdqu 2 * 16(%rdx), RA3; - vmovdqu 3 * 16(%rdx), RB0; - vmovdqu 4 * 16(%rdx), RB1; - vmovdqu 5 * 16(%rdx), RB2; - vmovdqu 6 * 16(%rdx), RB3; - - /* Update IV */ - vmovdqu 7 * 16(%rdx), RNOT; - vmovdqu RNOT, (%rcx); - - call __sm4_crypt_blk8; - - vpxor (0 * 16)(%rdx), RA0, RA0; - vpxor (1 * 16)(%rdx), RA1, RA1; - vpxor (2 * 16)(%rdx), RA2, RA2; - vpxor (3 * 16)(%rdx), RA3, RA3; - vpxor (4 * 16)(%rdx), RB0, RB0; - vpxor (5 * 16)(%rdx), RB1, RB1; - vpxor (6 * 16)(%rdx), RB2, RB2; - vpxor (7 * 16)(%rdx), RB3, RB3; - - vmovdqu RA0, (0 * 16)(%rsi); - vmovdqu RA1, (1 * 16)(%rsi); - vmovdqu RA2, (2 * 16)(%rsi); - vmovdqu RA3, (3 * 16)(%rsi); - vmovdqu RB0, (4 * 16)(%rsi); - vmovdqu RB1, (5 * 16)(%rsi); - vmovdqu RB2, (6 * 16)(%rsi); - vmovdqu RB3, (7 * 16)(%rsi); - - vzeroall; - FRAME_END - RET; -SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8) diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S index 98ede9459287..9ff5ba075591 100644 --- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S @@ -439,58 +439,3 @@ SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) - -/* - * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst, - * const u8 *src, u8 *iv) - */ -SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) - /* input: - * %rdi: round key array, CTX - * %rsi: dst (16 blocks) - * %rdx: src (16 blocks) - * %rcx: iv - */ - FRAME_BEGIN - - vzeroupper; - - /* Load input */ - vmovdqu (%rcx), RNOTx; - vinserti128 $1, (%rdx), RNOT, RA0; - vmovdqu (0 * 32 + 16)(%rdx), RA1; - vmovdqu (1 * 32 + 16)(%rdx), RA2; - vmovdqu (2 * 32 + 16)(%rdx), RA3; - vmovdqu (3 * 32 + 16)(%rdx), RB0; - vmovdqu (4 * 32 + 16)(%rdx), RB1; - vmovdqu (5 * 32 + 16)(%rdx), RB2; - vmovdqu (6 * 32 + 16)(%rdx), RB3; - - /* Update IV */ - vmovdqu (7 * 32 + 16)(%rdx), RNOTx; - vmovdqu RNOTx, (%rcx); - - call __sm4_crypt_blk16; - - vpxor (0 * 32)(%rdx), RA0, RA0; - vpxor (1 * 32)(%rdx), RA1, RA1; - vpxor (2 * 32)(%rdx), RA2, RA2; - vpxor (3 * 32)(%rdx), RA3, RA3; - vpxor (4 * 32)(%rdx), RB0, RB0; - vpxor (5 * 32)(%rdx), RB1, RB1; - vpxor (6 * 32)(%rdx), RB2, RB2; - vpxor (7 * 32)(%rdx), RB3, RB3; - - vmovdqu RA0, (0 * 32)(%rsi); - vmovdqu RA1, (1 * 32)(%rsi); - vmovdqu RA2, (2 * 32)(%rsi); - vmovdqu RA3, (3 * 32)(%rsi); - vmovdqu RB0, (4 * 32)(%rsi); - vmovdqu RB1, (5 * 32)(%rsi); - vmovdqu RB2, (6 * 32)(%rsi); - vmovdqu RB3, (7 * 32)(%rsi); - - vzeroall; - FRAME_END - RET; -SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16) diff --git a/arch/x86/crypto/sm4-avx.h b/arch/x86/crypto/sm4-avx.h index 1bceab7516aa..b5b5e67e40ed 100644 --- a/arch/x86/crypto/sm4-avx.h +++ b/arch/x86/crypto/sm4-avx.h @@ -14,10 +14,6 @@ int sm4_cbc_encrypt(struct skcipher_request *req); int sm4_avx_cbc_decrypt(struct skcipher_request *req, unsigned int bsize, sm4_crypt_func func); -int sm4_cfb_encrypt(struct skcipher_request *req); -int sm4_avx_cfb_decrypt(struct skcipher_request *req, - unsigned int bsize, sm4_crypt_func func); - int sm4_avx_ctr_crypt(struct skcipher_request *req, unsigned int bsize, sm4_crypt_func func); diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c index 84bc718f49a3..1148fd4cd57f 100644 --- a/arch/x86/crypto/sm4_aesni_avx2_glue.c +++ b/arch/x86/crypto/sm4_aesni_avx2_glue.c @@ -23,8 +23,6 @@ asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, const u8 *src, u8 *iv); asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst, const u8 *src, u8 *iv); -asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst, - const u8 *src, u8 *iv); static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) @@ -41,12 +39,6 @@ static int cbc_decrypt(struct skcipher_request *req) } -static int cfb_decrypt(struct skcipher_request *req) -{ - return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE, - sm4_aesni_avx2_cfb_dec_blk16); -} - static int ctr_crypt(struct skcipher_request *req) { return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE, @@ -87,24 +79,6 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = { .setkey = sm4_skcipher_setkey, .encrypt = sm4_cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base = { - .cra_name = "__cfb(sm4)", - .cra_driver_name = "__cfb-sm4-aesni-avx2", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, - }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 16 * SM4_BLOCK_SIZE, - .setkey = sm4_skcipher_setkey, - .encrypt = sm4_cfb_encrypt, - .decrypt = cfb_decrypt, }, { .base = { .cra_name = "__ctr(sm4)", diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c index 7800f77d68ad..85b4ca78b47b 100644 --- a/arch/x86/crypto/sm4_aesni_avx_glue.c +++ b/arch/x86/crypto/sm4_aesni_avx_glue.c @@ -27,8 +27,6 @@ asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, const u8 *src, u8 *iv); asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, const u8 *src, u8 *iv); -asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, - const u8 *src, u8 *iv); static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) @@ -188,116 +186,6 @@ static int cbc_decrypt(struct skcipher_request *req) sm4_aesni_avx_cbc_dec_blk8); } -int sm4_cfb_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - u8 keystream[SM4_BLOCK_SIZE]; - const u8 *iv = walk.iv; - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - - while (nbytes >= SM4_BLOCK_SIZE) { - sm4_crypt_block(ctx->rkey_enc, keystream, iv); - crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE); - iv = dst; - src += SM4_BLOCK_SIZE; - dst += SM4_BLOCK_SIZE; - nbytes -= SM4_BLOCK_SIZE; - } - if (iv != walk.iv) - memcpy(walk.iv, iv, SM4_BLOCK_SIZE); - - /* tail */ - if (walk.nbytes == walk.total && nbytes > 0) { - sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); - crypto_xor_cpy(dst, src, keystream, nbytes); - nbytes = 0; - } - - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} -EXPORT_SYMBOL_GPL(sm4_cfb_encrypt); - -int sm4_avx_cfb_decrypt(struct skcipher_request *req, - unsigned int bsize, sm4_crypt_func func) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - - kernel_fpu_begin(); - - while (nbytes >= bsize) { - func(ctx->rkey_enc, dst, src, walk.iv); - dst += bsize; - src += bsize; - nbytes -= bsize; - } - - while (nbytes >= SM4_BLOCK_SIZE) { - u8 keystream[SM4_BLOCK_SIZE * 8]; - unsigned int nblocks = min(nbytes >> 4, 8u); - - memcpy(keystream, walk.iv, SM4_BLOCK_SIZE); - if (nblocks > 1) - memcpy(&keystream[SM4_BLOCK_SIZE], src, - (nblocks - 1) * SM4_BLOCK_SIZE); - memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE, - SM4_BLOCK_SIZE); - - sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream, - keystream, nblocks); - - crypto_xor_cpy(dst, src, keystream, - nblocks * SM4_BLOCK_SIZE); - dst += nblocks * SM4_BLOCK_SIZE; - src += nblocks * SM4_BLOCK_SIZE; - nbytes -= nblocks * SM4_BLOCK_SIZE; - } - - kernel_fpu_end(); - - /* tail */ - if (walk.nbytes == walk.total && nbytes > 0) { - u8 keystream[SM4_BLOCK_SIZE]; - - sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); - crypto_xor_cpy(dst, src, keystream, nbytes); - nbytes = 0; - } - - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} -EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt); - -static int cfb_decrypt(struct skcipher_request *req) -{ - return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE, - sm4_aesni_avx_cfb_dec_blk8); -} - int sm4_avx_ctr_crypt(struct skcipher_request *req, unsigned int bsize, sm4_crypt_func func) { @@ -406,24 +294,6 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = { .setkey = sm4_skcipher_setkey, .encrypt = sm4_cbc_encrypt, .decrypt = cbc_decrypt, - }, { - .base = { - .cra_name = "__cfb(sm4)", - .cra_driver_name = "__cfb-sm4-aesni-avx", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, - }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = sm4_skcipher_setkey, - .encrypt = sm4_cfb_encrypt, - .decrypt = cfb_decrypt, }, { .base = { .cra_name = "__ctr(sm4)", -- cgit v1.2.3