summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 20:40:14 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 20:40:14 +0300
commitab5c60b79ab6cc50b39bbb21b2f9fb55af900b84 (patch)
tree71fa895fbf01e3b88f26cf257d9105f9d286b631 /crypto
parent5577416c39652d395a6045677f4f598564aba1cf (diff)
parent3cbfe80737c18ac6e635421ab676716a393d3074 (diff)
downloadlinux-ab5c60b79ab6cc50b39bbb21b2f9fb55af900b84.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add support for allocating transforms on a specific NUMA Node - Introduce the flag CRYPTO_ALG_ALLOCATES_MEMORY for storage users Algorithms: - Drop PMULL based ghash on arm64 - Fixes for building with clang on x86 - Add sha256 helper that does the digest in one go - Add SP800-56A rev 3 validation checks to dh Drivers: - Permit users to specify NUMA node in hisilicon/zip - Add support for i.MX6 in imx-rngc - Add sa2ul crypto driver - Add BA431 hwrng driver - Add Ingenic JZ4780 and X1000 hwrng driver - Spread IRQ affinity in inside-secure and marvell/cesa" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (157 commits) crypto: sa2ul - Fix inconsistent IS_ERR and PTR_ERR hwrng: core - remove redundant initialization of variable ret crypto: x86/curve25519 - Remove unused carry variables crypto: ingenic - Add hardware RNG for Ingenic JZ4780 and X1000 dt-bindings: RNG: Add Ingenic RNG bindings. crypto: caam/qi2 - add module alias crypto: caam - add more RNG hw error codes crypto: caam/jr - remove incorrect reference to caam_jr_register() crypto: caam - silence .setkey in case of bad key length crypto: caam/qi2 - create ahash shared descriptors only once crypto: caam/qi2 - fix error reporting for caam_hash_alloc crypto: caam - remove deadcode on 32-bit platforms crypto: ccp - use generic power management crypto: xts - Replace memcpy() invocation with simple assignment crypto: marvell/cesa - irq balance crypto: inside-secure - irq balance crypto: ecc - SP800-56A rev 3 local public key validation crypto: dh - SP800-56A rev 3 local public key validation crypto: dh - check validity of Z before export lib/mpi: Add mpi_sub_ui() ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig46
-rw-r--r--crypto/acompress.c8
-rw-r--r--crypto/adiantum.c14
-rw-r--r--crypto/af_alg.c11
-rw-r--r--crypto/algapi.c21
-rw-r--r--crypto/algif_aead.c4
-rw-r--r--crypto/algif_skcipher.c4
-rw-r--r--crypto/api.c24
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c14
-rw-r--r--crypto/blake2b_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/ccm.c33
-rw-r--r--crypto/chacha20poly1305.c14
-rw-r--r--crypto/cmac.c5
-rw-r--r--crypto/cryptd.c59
-rw-r--r--crypto/ctr.c17
-rw-r--r--crypto/cts.c13
-rw-r--r--crypto/dh.c38
-rw-r--r--crypto/ecc.c44
-rw-r--r--crypto/ecc.h14
-rw-r--r--crypto/echainiv.c2
-rw-r--r--crypto/essiv.c11
-rw-r--r--crypto/gcm.c40
-rw-r--r--crypto/geniv.c19
-rw-r--r--crypto/hmac.c5
-rw-r--r--crypto/internal.h23
-rw-r--r--crypto/jitterentropy.c4
-rw-r--r--crypto/lrw.c134
-rw-r--r--crypto/pcrypt.c31
-rw-r--r--crypto/rsa-pkcs1pad.c13
-rw-r--r--crypto/salsa20_generic.c4
-rw-r--r--crypto/seqiv.c18
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--crypto/simd.c6
-rw-r--r--crypto/skcipher.c13
-rw-r--r--crypto/testmgr.h10
-rw-r--r--crypto/vmac.c5
-rw-r--r--crypto/xcbc.c5
-rw-r--r--crypto/xts.c154
40 files changed, 456 insertions, 444 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 091c0a0bbf26..1b57419fa2e7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -548,7 +548,7 @@ config CRYPTO_XCBC
select CRYPTO_MANAGER
help
XCBC: Keyed-Hashing with encryption algorithm
- http://www.ietf.org/rfc/rfc3566.txt
+ https://www.ietf.org/rfc/rfc3566.txt
http://csrc.nist.gov/encryption/modes/proposedmodes/
xcbc-mac/xcbc-mac-spec.pdf
@@ -561,7 +561,7 @@ config CRYPTO_VMAC
very high speed on 64-bit architectures.
See also:
- <http://fastcrypto.org/vmac>
+ <https://fastcrypto.org/vmac>
comment "Digest"
@@ -816,7 +816,7 @@ config CRYPTO_RMD128
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
@@ -833,7 +833,7 @@ config CRYPTO_RMD160
against RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
@@ -845,7 +845,7 @@ config CRYPTO_RMD256
(than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
@@ -857,7 +857,7 @@ config CRYPTO_RMD320
(than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
@@ -1045,7 +1045,7 @@ config CRYPTO_TGR192
Tiger was developed by Ross Anderson and Eli Biham.
See also:
- <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
+ <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
config CRYPTO_WP512
tristate "Whirlpool digest algorithms"
@@ -1221,7 +1221,7 @@ config CRYPTO_BLOWFISH
designed for use on "large microprocessors".
See also:
- <http://www.schneier.com/blowfish.html>
+ <https://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_COMMON
tristate
@@ -1230,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON
generic c and the assembler implementations.
See also:
- <http://www.schneier.com/blowfish.html>
+ <https://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
@@ -1245,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64
designed for use on "large microprocessors".
See also:
- <http://www.schneier.com/blowfish.html>
+ <https://www.schneier.com/blowfish.html>
config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms"
@@ -1441,10 +1441,10 @@ config CRYPTO_SALSA20
Salsa20 stream cipher algorithm.
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
- Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
+ Stream Cipher Project. See <https://www.ecrypt.eu.org/stream/>
The Salsa20 stream cipher algorithm is designed by Daniel J.
- Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
+ Bernstein <djb@cr.yp.to>. See <https://cr.yp.to/snuffle.html>
config CRYPTO_CHACHA20
tristate "ChaCha stream cipher algorithms"
@@ -1456,7 +1456,7 @@ config CRYPTO_CHACHA20
ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
Bernstein and further specified in RFC7539 for use in IETF protocols.
This is the portable C implementation of ChaCha20. See also:
- <http://cr.yp.to/chacha/chacha-20080128.pdf>
+ <https://cr.yp.to/chacha/chacha-20080128.pdf>
XChaCha20 is the application of the XSalsa20 construction to ChaCha20
rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
@@ -1509,7 +1509,7 @@ config CRYPTO_SERPENT
variant of Serpent for compatibility with old kerneli.org code.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
@@ -1528,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
blocks parallel using SSE2 instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
@@ -1547,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586
blocks parallel using SSE2 instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_AVX_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX)"
@@ -1567,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64
eight blocks parallel using the AVX instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_AVX2_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX2)"
@@ -1583,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
blocks parallel using AVX2 instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SM4
tristate "SM4 cipher algorithm"
@@ -1640,7 +1640,7 @@ config CRYPTO_TWOFISH
bits.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_COMMON
tristate
@@ -1662,7 +1662,7 @@ config CRYPTO_TWOFISH_586
bits.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64
tristate "Twofish cipher algorithm (x86_64)"
@@ -1678,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64
bits.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
@@ -1699,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY
blocks parallel, utilizing resources of out-of-order CPUs better.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_AVX_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX)"
@@ -1722,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
eight blocks parallel using the AVX Instruction Set.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
comment "Compression"
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 84a76723e851..c32c72048a1c 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
+ u32 mask, int node)
+{
+ return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
+ node);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
+
struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index cf2b9f4103dd..7fbdc3270984 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
const char *nhpoly1305_name;
struct skcipher_instance *inst;
@@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_alg *hash_alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
@@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
- CRYPTO_ALG_ASYNC;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 28fc323e3fe3..5882ed46f1ad 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
if (!ctx->used)
ctx->merge = 0;
+ ctx->init = ctx->more;
}
EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
@@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
*
* @sk socket of connection to user space
* @flags If MSG_DONTWAIT is set, then only report if function would sleep
+ * @min Set to minimum request size if partial requests are allowed.
* @return 0 when writable memory is available, < 0 upon error
*/
-int af_alg_wait_for_data(struct sock *sk, unsigned flags)
+int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct alg_sock *ask = alg_sk(sk);
@@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
if (signal_pending(current))
break;
timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
+ if (sk_wait_event(sk, &timeout,
+ ctx->init && (!ctx->more ||
+ (min && ctx->used >= min)),
&wait)) {
err = 0;
break;
@@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
lock_sock(sk);
- if (!ctx->more && ctx->used) {
+ if (ctx->init && (init || !ctx->more)) {
err = -EINVAL;
goto unlock;
}
+ ctx->init = true;
if (init) {
ctx->enc = enc;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 92abdf675992..fdabf2675b63 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
spawn->mask = mask;
spawn->next = inst->spawns;
inst->spawns = spawn;
+ inst->alg.cra_flags |=
+ (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
err = 0;
}
up_write(&crypto_alg_sem);
@@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
}
EXPORT_SYMBOL_GPL(crypto_get_attr_type);
-int crypto_check_attr_type(struct rtattr **tb, u32 type)
+/**
+ * crypto_check_attr_type() - check algorithm type and compute inherited mask
+ * @tb: the template parameters
+ * @type: the algorithm type the template would be instantiated as
+ * @mask_ret: (output) the mask that should be passed to crypto_grab_*()
+ * to restrict the flags of any inner algorithms
+ *
+ * Validate that the algorithm type the user requested is compatible with the
+ * one the template would actually be instantiated as. E.g., if the user is
+ * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because
+ * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm.
+ *
+ * Also compute the mask to use to restrict the flags of any inner algorithms.
+ *
+ * Return: 0 on success; -errno on failure
+ */
+int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret)
{
struct crypto_attr_type *algt;
@@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type)
if ((algt->type ^ type) & algt->mask)
return -EINVAL;
+ *mask_ret = crypto_algt_inherited_mask(algt);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 0ae000a61c7f..d48d2156e621 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t usedpages = 0; /* [in] RX bufs to be used from user */
size_t processed = 0; /* [in] TX bufs to be consumed */
- if (!ctx->used) {
- err = af_alg_wait_for_data(sk, flags);
+ if (!ctx->init || ctx->more) {
+ err = af_alg_wait_for_data(sk, flags, 0);
if (err)
return err;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ec5567c87a6d..a51ba22fef58 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
int err = 0;
size_t len = 0;
- if (!ctx->used) {
- err = af_alg_wait_for_data(sk, flags);
+ if (!ctx->init || (ctx->more && ctx->used < bs)) {
+ err = af_alg_wait_for_data(sk, flags, bs);
if (err)
return err;
}
diff --git a/crypto/api.c b/crypto/api.c
index edcf690800d4..5d8fe60b36c1 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -433,8 +433,9 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
-void *crypto_create_tfm(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend,
+ int node)
{
char *mem;
struct crypto_tfm *tfm = NULL;
@@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
- mem = kzalloc(total, GFP_KERNEL);
+ mem = kzalloc_node(total, GFP_KERNEL, node);
if (mem == NULL)
goto out_err;
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
+ tfm->node = node;
err = frontend->init_tfm(tfm);
if (err)
@@ -472,7 +474,7 @@ out_err:
out:
return mem;
}
-EXPORT_SYMBOL_GPL(crypto_create_tfm);
+EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
@@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
EXPORT_SYMBOL_GPL(crypto_find_alg);
/*
- * crypto_alloc_tfm - Locate algorithm and allocate transform
+ * crypto_alloc_tfm_node - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
* @frontend: Frontend algorithm type
* @type: Type of algorithm
* @mask: Mask for type comparison
+ * @node: NUMA node in which users desire to put requests, if node is
+ * NUMA_NO_NODE, it means users have no special requirement.
*
* crypto_alloc_tfm() will first attempt to locate an already loaded
* algorithm. If that fails and the kernel supports dynamically loadable
@@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg);
*
* In case of error the return value is an error pointer.
*/
-void *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend, u32 type, u32 mask)
+
+void *crypto_alloc_tfm_node(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask,
+ int node)
{
void *tfm;
int err;
@@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name,
goto err;
}
- tfm = crypto_create_tfm(alg, frontend);
+ tfm = crypto_create_tfm_node(alg, frontend, node);
if (!IS_ERR(tfm))
return tfm;
@@ -542,7 +548,7 @@ err:
return ERR_PTR(err);
}
-EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
+EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
/*
* crypto_destroy_tfm - Free crypto transform
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 775e7138fd10..670bf1a01d00 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst)
static int crypto_authenc_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct authenc_instance_ctx *ctx;
@@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
struct skcipher_alg *enc;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (auth_base->cra_flags |
- enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 149b70df2a91..b60e61b1904c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst)
static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct authenc_esn_instance_ctx *ctx;
@@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct skcipher_alg *enc;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (auth_base->cra_flags |
- enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index 0ffd8d92e308..a2ffe60e06d3 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -8,7 +8,7 @@
*
* - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
* - OpenSSL license : https://www.openssl.org/source/license.html
- * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
*
* More information about the BLAKE2 hash function can be found at
* https://blake2.net.
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 9a5783e5196a..0b9f409f7370 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -6,7 +6,7 @@
/*
* Algorithm Specification
- * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
+ * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
*/
/*
diff --git a/crypto/ccm.c b/crypto/ccm.c
index d1fb01bbc814..494d70901186 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *mac_name)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct ccm_instance_ctx *ictx;
@@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct hash_alg_common *mac;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
@@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ictx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst),
- mac_name, 0, CRYPTO_ALG_ASYNC);
+ mac_name, 0, mask | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
mac = crypto_spawn_ahash_alg(&ictx->mac);
@@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
@@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst)
static int crypto_rfc4309_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ccaea5cb66d1..97bbb135e9a6 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst)
static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
const char *name, unsigned int ivsize)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
@@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
if (ivsize > CHACHAPOLY_IV_SIZE)
return -EINVAL;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (chacha->base.cra_flags |
- poly->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (chacha->base.cra_priority +
poly->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 143a6544c873..df36be1efb81 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 283212262adb..a1bea0f4baa8 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
return ictx->queue;
}
-static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
+static void cryptd_type_and_mask(struct crypto_attr_type *algt,
+ u32 *type, u32 *mask)
{
- struct crypto_attr_type *algt;
+ /*
+ * cryptd is allowed to wrap internal algorithms, but in that case the
+ * resulting cryptd instance will be marked as internal as well.
+ */
+ *type = algt->type & CRYPTO_ALG_INTERNAL;
+ *mask = algt->mask & CRYPTO_ALG_INTERNAL;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return;
+ /* No point in cryptd wrapping an algorithm that's already async. */
+ *mask |= CRYPTO_ALG_ASYNC;
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
+ *mask |= crypto_algt_inherited_mask(algt);
}
static int cryptd_init_instance(struct crypto_instance *inst,
@@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst)
static int cryptd_create_skcipher(struct crypto_template *tmpl,
struct rtattr **tb,
+ struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct skcipherd_instance_ctx *ctx;
@@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
u32 mask;
int err;
- type = 0;
- mask = CRYPTO_ALG_ASYNC;
-
- cryptd_check_internal(tb, &type, &mask);
+ cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
-
+ inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
@@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst)
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct shash_alg *alg;
- u32 type = 0;
- u32 mask = 0;
+ u32 type;
+ u32 mask;
int err;
- cryptd_check_internal(tb, &type, &mask);
+ cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto err_free_inst;
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL |
+ inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
CRYPTO_ALG_OPTIONAL_KEY));
-
inst->alg.halg.digestsize = alg->digestsize;
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
@@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst)
static int cryptd_create_aead(struct crypto_template *tmpl,
struct rtattr **tb,
+ struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct aead_instance_ctx *ctx;
struct aead_instance *inst;
struct aead_alg *alg;
- u32 type = 0;
- u32 mask = CRYPTO_ALG_ASYNC;
+ u32 type;
+ u32 mask;
int err;
- cryptd_check_internal(tb, &type, &mask);
+ cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
+ inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
@@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- return cryptd_create_skcipher(tmpl, tb, &queue);
+ return cryptd_create_skcipher(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_HASH:
- return cryptd_create_hash(tmpl, tb, &queue);
+ return cryptd_create_hash(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_AEAD:
- return cryptd_create_aead(tmpl, tb, &queue);
+ return cryptd_create_aead(tmpl, tb, algt, &queue);
}
return -EINVAL;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 31ac4ae598e1..c39fcffba27f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -256,29 +256,20 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst)
static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
u32 mask;
-
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
- mask = crypto_requires_sync(algt->type, algt->mask) |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
-
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
@@ -310,8 +301,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
-
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
diff --git a/crypto/cts.c b/crypto/cts.c
index 5e005c4f0221..3766d47ebcc0 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
struct skcipher_alg *alg;
u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
diff --git a/crypto/dh.c b/crypto/dh.c
index 566f624a2de2..cd4f32092e5c 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -9,6 +9,7 @@
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <crypto/dh.h>
+#include <linux/fips.h>
#include <linux/mpi.h>
struct dh_ctx {
@@ -179,6 +180,43 @@ static int dh_compute_value(struct kpp_request *req)
if (ret)
goto err_free_base;
+ if (fips_enabled) {
+ /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */
+ if (req->src) {
+ MPI pone;
+
+ /* z <= 1 */
+ if (mpi_cmp_ui(val, 1) < 1) {
+ ret = -EBADMSG;
+ goto err_free_base;
+ }
+
+ /* z == p - 1 */
+ pone = mpi_alloc(0);
+
+ if (!pone) {
+ ret = -ENOMEM;
+ goto err_free_base;
+ }
+
+ ret = mpi_sub_ui(pone, ctx->p, 1);
+ if (!ret && !mpi_cmp(pone, val))
+ ret = -EBADMSG;
+
+ mpi_free(pone);
+
+ if (ret)
+ goto err_free_base;
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ } else {
+ if (dh_is_pubkey_valid(ctx, val)) {
+ ret = -EAGAIN;
+ goto err_free_val;
+ }
+ }
+ }
+
ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_base;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 02d35be7702b..8acf8433ca29 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point)
}
/* Point multiplication algorithm using Montgomery's ladder with co-Z
- * coordinates. From http://eprint.iacr.org/2011/338.pdf
+ * coordinates. From https://eprint.iacr.org/2011/338.pdf
*/
/* Double in place */
@@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
}
ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
- if (ecc_point_is_zero(pk)) {
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ if (ecc_is_pubkey_valid_full(curve, pk)) {
ret = -EAGAIN;
goto err_free_point;
}
@@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
}
EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
+/* SP800-56A section 5.6.2.3.3 full verification */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ struct ecc_point *nQ;
+
+ /* Checks 1 through 3 */
+ int ret = ecc_is_pubkey_valid_partial(curve, pk);
+
+ if (ret)
+ return ret;
+
+ /* Check 4: Verify that nQ is the zero point. */
+ nQ = ecc_alloc_point(pk->ndigits);
+ if (!nQ)
+ return -ENOMEM;
+
+ ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits);
+ if (!ecc_point_is_zero(nQ))
+ ret = -EINVAL;
+
+ ecc_free_point(nQ);
+
+ return ret;
+}
+EXPORT_SYMBOL(ecc_is_pubkey_valid_full);
+
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
@@ -1495,11 +1524,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
- ecc_swap_digits(product->x, secret, ndigits);
-
- if (ecc_point_is_zero(product))
+ if (ecc_point_is_zero(product)) {
ret = -EFAULT;
+ goto err_validity;
+ }
+
+ ecc_swap_digits(product->x, secret, ndigits);
+err_validity:
+ memzero_explicit(priv, sizeof(priv));
+ memzero_explicit(rand_z, sizeof(rand_z));
ecc_free_point(product);
err_alloc_product:
ecc_free_point(pk);
diff --git a/crypto/ecc.h b/crypto/ecc.h
index ab0eb70b9c09..d4e546b9ad79 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -148,6 +148,20 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
struct ecc_point *pk);
/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
* vli_is_zero() - Determine is vli is zero
*
* @vli: vli to check.
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 4a2f02baba14..69686668625e 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
struct aead_instance *inst;
int err;
- inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+ inst = aead_geniv_alloc(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
diff --git a/crypto/essiv.c b/crypto/essiv.c
index a7f45dbc4ee2..d012be23d496 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(shash_name);
type = algt->type & algt->mask;
- mask = crypto_requires_sync(algt->type, algt->mask);
+ mask = crypto_algt_inherited_mask(algt);
switch (type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
@@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Synchronous hash, e.g., "sha256" */
_hash_alg = crypto_alg_mod_lookup(shash_name,
CRYPTO_ALG_TYPE_SHASH,
- CRYPTO_ALG_TYPE_MASK);
+ CRYPTO_ALG_TYPE_MASK | mask);
if (IS_ERR(_hash_alg)) {
err = PTR_ERR(_hash_alg);
goto out_drop_skcipher;
@@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_hash;
- base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC;
+ /*
+ * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its
+ * flags manually.
+ */
+ base->cra_flags |= (hash_alg->base.cra_flags &
+ CRYPTO_ALG_INHERITED_FLAGS);
base->cra_blocksize = block_base->cra_blocksize;
base->cra_ctxsize = sizeof(struct essiv_tfm_ctx);
base->cra_alignmask = block_base->cra_alignmask;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 0103d28c541e..3a36a9533c96 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *ghash_name)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct gcm_instance_ctx *ctx;
@@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct hash_alg_common *ghash;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (ghash->base.cra_flags |
- ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
@@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst)
static int crypto_rfc4106_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst)
static int crypto_rfc4543_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct aead_alg *alg;
struct crypto_rfc4543_instance_ctx *ctx;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
diff --git a/crypto/geniv.c b/crypto/geniv.c
index 6a90c52d49ad..bee4621b4f12 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -39,22 +39,19 @@ static void aead_geniv_free(struct aead_instance *inst)
}
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask)
+ struct rtattr **tb)
{
struct crypto_aead_spawn *spawn;
- struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
unsigned int ivsize;
unsigned int maxauthsize;
+ u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return ERR_PTR(-EINVAL);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -62,11 +59,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
spawn = aead_instance_ctx(inst);
- /* Ignore async algorithms if necessary. */
- mask |= crypto_requires_sync(algt->type, algt->mask);
-
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), type, mask);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
@@ -89,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e38bfb948278..25856aa7ccbf 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
+ u32 mask;
int err;
int ds;
int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
salg = crypto_spawn_shash_alg(spawn);
diff --git a/crypto/internal.h b/crypto/internal.h
index ff06a3bd1ca1..1b92a5a61852 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
-void *crypto_create_tfm(struct crypto_alg *alg,
- const struct crypto_type *frontend);
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend, int node);
+
+static inline void *crypto_create_tfm(struct crypto_alg *alg,
+ const struct crypto_type *frontend)
+{
+ return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE);
+}
+
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
-void *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend, u32 type, u32 mask);
+
+void *crypto_alloc_tfm_node(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask,
+ int node);
+
+static inline void *crypto_alloc_tfm(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE);
+}
int crypto_probing_notify(unsigned long val, void *v);
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 57f4a1ac738b..6e147c43fc18 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -7,7 +7,7 @@
* Design
* ======
*
- * See http://www.chronox.de/jent.html
+ * See https://www.chronox.de/jent.html
*
* License
* =======
@@ -47,7 +47,7 @@
/*
* This Jitterentropy RNG is based on the jitterentropy library
- * version 2.2.0 provided at http://www.chronox.de/jent.html
+ * version 2.2.0 provided at https://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 5b07a7c09296..bcf09fbc750a 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -9,7 +9,7 @@
*/
/* This implementation is checked against the test vectors in the above
* document and by a test vector provided by Ken Buchanan at
- * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
+ * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
*
* The test vectors are included in the testing module tcrypt.[ch] */
@@ -27,7 +27,7 @@
#define LRW_BLOCK_SIZE 16
-struct priv {
+struct lrw_tfm_ctx {
struct crypto_skcipher *child;
/*
@@ -49,12 +49,12 @@ struct priv {
be128 mulinc[128];
};
-struct rctx {
+struct lrw_request_ctx {
be128 t;
struct skcipher_request subreq;
};
-static inline void setbit128_bbe(void *b, int bit)
+static inline void lrw_setbit128_bbe(void *b, int bit)
{
__set_bit(bit ^ (0x80 -
#ifdef __BIG_ENDIAN
@@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit)
), b);
}
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
+static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
- struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
int err, bsize = LRW_BLOCK_SIZE;
const u8 *tweak = key + keylen - bsize;
@@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
/* initialize optimization table */
for (i = 0; i < 128; i++) {
- setbit128_bbe(&tmp, i);
+ lrw_setbit128_bbe(&tmp, i);
ctx->mulinc[i] = tmp;
gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
}
@@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
* For example:
*
* u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
- * int i = next_index(&counter);
+ * int i = lrw_next_index(&counter);
* // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
*/
-static int next_index(u32 *counter)
+static int lrw_next_index(u32 *counter)
{
int i, res = 0;
@@ -135,14 +135,14 @@ static int next_index(u32 *counter)
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
- * just doing the next_index() calls again.
+ * just doing the lrw_next_index() calls again.
*/
-static int xor_tweak(struct skcipher_request *req, bool second_pass)
+static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
{
const int bs = LRW_BLOCK_SIZE;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct priv *ctx = crypto_skcipher_ctx(tfm);
- struct rctx *rctx = skcipher_request_ctx(req);
+ const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
be128 t = rctx->t;
struct skcipher_walk w;
__be32 *iv;
@@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
/* T <- I*Key2, using the optimization
* discussed in the specification */
- be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
+ be128_xor(&t, &t,
+ &ctx->mulinc[lrw_next_index(counter)]);
} while ((avail -= bs) >= bs);
if (second_pass && w.nbytes == w.total) {
@@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
return err;
}
-static int xor_tweak_pre(struct skcipher_request *req)
+static int lrw_xor_tweak_pre(struct skcipher_request *req)
{
- return xor_tweak(req, false);
+ return lrw_xor_tweak(req, false);
}
-static int xor_tweak_post(struct skcipher_request *req)
+static int lrw_xor_tweak_post(struct skcipher_request *req)
{
- return xor_tweak(req, true);
+ return lrw_xor_tweak(req, true);
}
-static void crypt_done(struct crypto_async_request *areq, int err)
+static void lrw_crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = xor_tweak_post(req);
+ err = lrw_xor_tweak_post(req);
}
skcipher_request_complete(req, err);
}
-static void init_crypt(struct skcipher_request *req)
+static void lrw_init_crypt(struct skcipher_request *req)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
+ const struct lrw_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
+ skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
+ req);
/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
skcipher_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, req->iv);
@@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req)
gf128mul_64k_bbe(&rctx->t, ctx->table);
}
-static int encrypt(struct skcipher_request *req)
+static int lrw_encrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
- init_crypt(req);
- return xor_tweak_pre(req) ?:
+ lrw_init_crypt(req);
+ return lrw_xor_tweak_pre(req) ?:
crypto_skcipher_encrypt(subreq) ?:
- xor_tweak_post(req);
+ lrw_xor_tweak_post(req);
}
-static int decrypt(struct skcipher_request *req)
+static int lrw_decrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
- init_crypt(req);
- return xor_tweak_pre(req) ?:
+ lrw_init_crypt(req);
+ return lrw_xor_tweak_pre(req) ?:
crypto_skcipher_decrypt(subreq) ?:
- xor_tweak_post(req);
+ lrw_xor_tweak_post(req);
}
-static int init_tfm(struct crypto_skcipher *tfm)
+static int lrw_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *cipher;
cipher = crypto_spawn_skcipher(spawn);
@@ -273,45 +276,39 @@ static int init_tfm(struct crypto_skcipher *tfm)
ctx->child = cipher;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
- sizeof(struct rctx));
+ sizeof(struct lrw_request_ctx));
return 0;
}
-static void exit_tfm(struct crypto_skcipher *tfm)
+static void lrw_exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->table)
gf128mul_free_64k(ctx->table);
crypto_free_skcipher(ctx->child);
}
-static void crypto_lrw_free(struct skcipher_instance *inst)
+static void lrw_free_instance(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
+static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
struct skcipher_alg *alg;
const char *cipher_name;
char ecb_name[CRYPTO_MAX_ALG_NAME];
u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -379,7 +376,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
} else
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
@@ -391,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
LRW_BLOCK_SIZE;
- inst->alg.base.cra_ctxsize = sizeof(struct priv);
+ inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
- inst->alg.init = init_tfm;
- inst->alg.exit = exit_tfm;
+ inst->alg.init = lrw_init_tfm;
+ inst->alg.exit = lrw_exit_tfm;
- inst->alg.setkey = setkey;
- inst->alg.encrypt = encrypt;
- inst->alg.decrypt = decrypt;
+ inst->alg.setkey = lrw_setkey;
+ inst->alg.encrypt = lrw_encrypt;
+ inst->alg.decrypt = lrw_decrypt;
- inst->free = crypto_lrw_free;
+ inst->free = lrw_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- crypto_lrw_free(inst);
+ lrw_free_instance(inst);
}
return err;
}
-static struct crypto_template crypto_tmpl = {
+static struct crypto_template lrw_tmpl = {
.name = "lrw",
- .create = create,
+ .create = lrw_create,
.module = THIS_MODULE,
};
-static int __init crypto_module_init(void)
+static int __init lrw_module_init(void)
{
- return crypto_register_template(&crypto_tmpl);
+ return crypto_register_template(&lrw_tmpl);
}
-static void __exit crypto_module_exit(void)
+static void __exit lrw_module_exit(void)
{
- crypto_unregister_template(&crypto_tmpl);
+ crypto_unregister_template(&lrw_tmpl);
}
-subsys_initcall(crypto_module_init);
-module_exit(crypto_module_exit);
+subsys_initcall(lrw_module_init);
+module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LRW block cipher mode");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 8bddc65cd509..d569c7ed6c80 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst,
}
static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
- u32 type, u32 mask)
+ struct crypto_attr_type *algt)
{
struct pcrypt_instance_ctx *ctx;
- struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
+ u32 mask = crypto_algt_inherited_mask(algt);
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
@@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
@@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
+ return pcrypt_create_aead(tmpl, tb, algt);
}
return -EINVAL;
@@ -320,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
{
int ret = -ENOMEM;
- *pinst = padata_alloc_possible(name);
+ *pinst = padata_alloc(name);
if (!*pinst)
return ret;
@@ -331,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
return ret;
}
-static void pcrypt_fini_padata(struct padata_instance *pinst)
-{
- padata_stop(pinst);
- padata_free(pinst);
-}
-
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.create = pcrypt_create,
@@ -359,13 +349,10 @@ static int __init pcrypt_init(void)
if (err)
goto err_deinit_pencrypt;
- padata_start(pencrypt);
- padata_start(pdecrypt);
-
return crypto_register_template(&pcrypt_tmpl);
err_deinit_pencrypt:
- pcrypt_fini_padata(pencrypt);
+ padata_free(pencrypt);
err_unreg_kset:
kset_unregister(pcrypt_kset);
err:
@@ -376,8 +363,8 @@ static void __exit pcrypt_exit(void)
{
crypto_unregister_template(&pcrypt_tmpl);
- pcrypt_fini_padata(pencrypt);
- pcrypt_fini_padata(pdecrypt);
+ padata_free(pencrypt);
+ padata_free(pdecrypt);
kset_unregister(pcrypt_kset);
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index d31031de51bc..4983b2b4a223 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
@@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *hash_name;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
}
- inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index c81a44404086..3418869dabef 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -9,8 +9,8 @@
* Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
* Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>.
* More information about eSTREAM and Salsa20 can be found here:
- * http://www.ecrypt.eu.org/stream/
- * http://cr.yp.to/snuffle.html
+ * https://www.ecrypt.eu.org/stream/
+ * https://cr.yp.to/snuffle.html
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index f124b9b54e15..23e22d8b63e6 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
struct aead_instance *inst;
int err;
- inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+ inst = aead_geniv_alloc(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
@@ -164,23 +164,9 @@ free_inst:
return err;
}
-static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
- return -EINVAL;
-
- return seqiv_aead_create(tmpl, tb);
-}
-
static struct crypto_template seqiv_tmpl = {
.name = "seqiv",
- .create = seqiv_create,
+ .create = seqiv_aead_create,
.module = THIS_MODULE,
};
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 44e263e25599..3e4069935b53 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -3,7 +3,7 @@
* Cryptographic API.
*
* SHA-3, as specified in
- * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+ * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
diff --git a/crypto/simd.c b/crypto/simd.c
index 56885af49c24..edaa479a1ec5 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
@@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7221def7b9a7..467af525848a 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -934,22 +934,15 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst)
struct skcipher_instance *skcipher_alloc_instance_simple(
struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct skcipher_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *cipher_alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return ERR_PTR(-EINVAL);
-
- mask = crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d29983908c38..b9a2d73d9f8d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = {
};
/*
- * SHA1 test vectors from from FIPS PUB 180-1
+ * SHA1 test vectors from FIPS PUB 180-1
* Long vector from CAVS 5.0
*/
static const struct hash_testvec sha1_tv_template[] = {
@@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = {
/*
- * SHA224 test vectors from from FIPS PUB 180-2
+ * SHA224 test vectors from FIPS PUB 180-2
*/
static const struct hash_testvec sha224_tv_template[] = {
{
@@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = {
};
/*
- * SHA256 test vectors from from NIST
+ * SHA256 test vectors from NIST
*/
static const struct hash_testvec sha256_tv_template[] = {
{
@@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = {
};
/*
- * SHA384 test vectors from from NIST and kerneli
+ * SHA384 test vectors from NIST and kerneli
*/
static const struct hash_testvec sha384_tv_template[] = {
{
@@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = {
};
/*
- * SHA512 test vectors from from NIST and kerneli
+ * SHA512 test vectors from NIST and kerneli
*/
static const struct hash_testvec sha512_tv_template[] = {
{
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 2d906830df96..9b565d1040d6 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 598ec88abf0f..af3b7eb5d7c7 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/xts.c b/crypto/xts.c
index 3565f3b863a6..ad45b009774b 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -20,7 +20,7 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-struct priv {
+struct xts_tfm_ctx {
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
};
@@ -30,17 +30,17 @@ struct xts_instance_ctx {
char name[CRYPTO_MAX_ALG_NAME];
};
-struct rctx {
+struct xts_request_ctx {
le128 t;
struct scatterlist *tail;
struct scatterlist sg[2];
struct skcipher_request subreq;
};
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
+static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
- struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
int err;
@@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again.
*/
-static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
+static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
+ bool enc)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
const int bs = XTS_BLOCK_SIZE;
@@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
return err;
}
-static int xor_tweak_pre(struct skcipher_request *req, bool enc)
+static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
{
- return xor_tweak(req, false, enc);
+ return xts_xor_tweak(req, false, enc);
}
-static int xor_tweak_post(struct skcipher_request *req, bool enc)
+static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
{
- return xor_tweak(req, true, enc);
+ return xts_xor_tweak(req, true, enc);
}
-static void cts_done(struct crypto_async_request *areq, int err)
+static void xts_cts_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
le128 b;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
le128_xor(&b, &rctx->t, &b);
@@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static int cts_final(struct skcipher_request *req,
- int (*crypt)(struct skcipher_request *req))
+static int xts_cts_final(struct skcipher_request *req,
+ int (*crypt)(struct skcipher_request *req))
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ const struct xts_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int tail = req->cryptlen % XTS_BLOCK_SIZE;
le128 b[2];
@@ -169,7 +171,7 @@ static int cts_final(struct skcipher_request *req,
offset - XTS_BLOCK_SIZE);
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
- memcpy(b + 1, b, tail);
+ b[1] = b[0];
scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
le128_xor(b, &rctx->t, b);
@@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req,
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
+ skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
+ req);
skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
XTS_BLOCK_SIZE, NULL);
@@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req,
return 0;
}
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void xts_encrypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = xor_tweak_post(req, true);
+ err = xts_xor_tweak_post(req, true);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
- err = cts_final(req, crypto_skcipher_encrypt);
+ err = xts_cts_final(req, crypto_skcipher_encrypt);
if (err == -EINPROGRESS)
return;
}
@@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static void xts_decrypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = xor_tweak_post(req, false);
+ err = xts_xor_tweak_post(req, false);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
- err = cts_final(req, crypto_skcipher_decrypt);
+ err = xts_cts_final(req, crypto_skcipher_decrypt);
if (err == -EINPROGRESS)
return;
}
@@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
+static int xts_init_crypt(struct skcipher_request *req,
+ crypto_completion_t compl)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
+ const struct xts_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
if (req->cryptlen < XTS_BLOCK_SIZE)
@@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
return 0;
}
-static int encrypt(struct skcipher_request *req)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int err;
- err = init_crypt(req, encrypt_done) ?:
- xor_tweak_pre(req, true) ?:
+ err = xts_init_crypt(req, xts_encrypt_done) ?:
+ xts_xor_tweak_pre(req, true) ?:
crypto_skcipher_encrypt(subreq) ?:
- xor_tweak_post(req, true);
+ xts_xor_tweak_post(req, true);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err;
- return cts_final(req, crypto_skcipher_encrypt);
+ return xts_cts_final(req, crypto_skcipher_encrypt);
}
-static int decrypt(struct skcipher_request *req)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int err;
- err = init_crypt(req, decrypt_done) ?:
- xor_tweak_pre(req, false) ?:
+ err = xts_init_crypt(req, xts_decrypt_done) ?:
+ xts_xor_tweak_pre(req, false) ?:
crypto_skcipher_decrypt(subreq) ?:
- xor_tweak_post(req, false);
+ xts_xor_tweak_post(req, false);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err;
- return cts_final(req, crypto_skcipher_decrypt);
+ return xts_cts_final(req, crypto_skcipher_decrypt);
}
-static int init_tfm(struct crypto_skcipher *tfm)
+static int xts_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
@@ -309,41 +314,39 @@ static int init_tfm(struct crypto_skcipher *tfm)
ctx->tweak = tweak;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
- sizeof(struct rctx));
+ sizeof(struct xts_request_ctx));
return 0;
}
-static void exit_tfm(struct crypto_skcipher *tfm)
+static void xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child);
crypto_free_cipher(ctx->tweak);
}
-static void crypto_xts_free(struct skcipher_instance *inst)
+static void xts_free_instance(struct skcipher_instance *inst)
{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ictx->spawn);
kfree(inst);
}
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
+static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
struct xts_instance_ctx *ctx;
struct skcipher_alg *alg;
const char *cipher_name;
u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -355,10 +358,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx = skcipher_instance_ctx(inst);
- mask = crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
-
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
if (err == -ENOENT) {
@@ -415,7 +414,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
} else
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
@@ -425,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
- inst->alg.base.cra_ctxsize = sizeof(struct priv);
+ inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
- inst->alg.init = init_tfm;
- inst->alg.exit = exit_tfm;
+ inst->alg.init = xts_init_tfm;
+ inst->alg.exit = xts_exit_tfm;
- inst->alg.setkey = setkey;
- inst->alg.encrypt = encrypt;
- inst->alg.decrypt = decrypt;
+ inst->alg.setkey = xts_setkey;
+ inst->alg.encrypt = xts_encrypt;
+ inst->alg.decrypt = xts_decrypt;
- inst->free = crypto_xts_free;
+ inst->free = xts_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- crypto_xts_free(inst);
+ xts_free_instance(inst);
}
return err;
}
-static struct crypto_template crypto_tmpl = {
+static struct crypto_template xts_tmpl = {
.name = "xts",
- .create = create,
+ .create = xts_create,
.module = THIS_MODULE,
};
-static int __init crypto_module_init(void)
+static int __init xts_module_init(void)
{
- return crypto_register_template(&crypto_tmpl);
+ return crypto_register_template(&xts_tmpl);
}
-static void __exit crypto_module_exit(void)
+static void __exit xts_module_exit(void)
{
- crypto_unregister_template(&crypto_tmpl);
+ crypto_unregister_template(&xts_tmpl);
}
-subsys_initcall(crypto_module_init);
-module_exit(crypto_module_exit);
+subsys_initcall(xts_module_init);
+module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");