summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 01:22:45 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 01:22:45 +0300
commita103950e0dd2058df5e8a8d4a915707bdcf205f0 (patch)
treeaf5d091f768db4ed7a12fc3c5484d3e20ad9d514 /crypto
parent2cfa1cd3da14814a1e9ec6a4fce8612637d3ee3d (diff)
parent2d55807b7f7bf62bb05a8b91247c5eb7cd19ac04 (diff)
downloadlinux-a103950e0dd2058df5e8a8d4a915707bdcf205f0.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Enforce the setting of keys for keyed aead/hash/skcipher algorithms. - Add multibuf speed tests in tcrypt. Algorithms: - Improve performance of sha3-generic. - Add native sha512 support on arm64. - Add v8.2 Crypto Extentions version of sha3/sm3 on arm64. - Avoid hmac nesting by requiring underlying algorithm to be unkeyed. - Add cryptd_max_cpu_qlen module parameter to cryptd. Drivers: - Add support for EIP97 engine in inside-secure. - Add inline IPsec support to chelsio. - Add RevB core support to crypto4xx. - Fix AEAD ICV check in crypto4xx. - Add stm32 crypto driver. - Add support for BCM63xx platforms in bcm2835 and remove bcm63xx. - Add Derived Key Protocol (DKP) support in caam. - Add Samsung Exynos True RNG driver. - Add support for Exynos5250+ SoCs in exynos PRNG driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (166 commits) crypto: picoxcell - Fix error handling in spacc_probe() crypto: arm64/sha512 - fix/improve new v8.2 Crypto Extensions code crypto: arm64/sm3 - new v8.2 Crypto Extensions implementation crypto: arm64/sha3 - new v8.2 Crypto Extensions implementation crypto: testmgr - add new testcases for sha3 crypto: sha3-generic - export init/update/final routines crypto: sha3-generic - simplify code crypto: sha3-generic - rewrite KECCAK transform to help the compiler optimize crypto: sha3-generic - fixes for alignment and big endian operation crypto: aesni - handle zero length dst buffer crypto: artpec6 - remove select on non-existing CRYPTO_SHA384 hwrng: bcm2835 - Remove redundant dev_err call in bcm2835_rng_probe() crypto: stm32 - remove redundant dev_err call in stm32_cryp_probe() crypto: axis - remove unnecessary platform_get_resource() error check crypto: testmgr - test misuse of result in ahash crypto: inside-secure - make function safexcel_try_push_requests static crypto: aes-generic - fix aes-generic regression on powerpc crypto: chelsio - Fix indentation warning crypto: arm64/sha1-ce - get rid of literal pool crypto: arm64/sha2-ce - move the round constant table to .rodata section ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig4
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablk_helper.c5
-rw-r--r--crypto/aead.c19
-rw-r--r--crypto/af_alg.c10
-rw-r--r--crypto/ahash.c33
-rw-r--r--crypto/algapi.c13
-rw-r--r--crypto/algif_aead.c15
-rw-r--r--crypto/algif_hash.c52
-rw-r--r--crypto/algif_skcipher.c59
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/authenc.c4
-rw-r--r--crypto/authencesn.c4
-rw-r--r--crypto/blkcipher.c1
-rw-r--r--crypto/camellia_generic.c3
-rw-r--r--crypto/cast5_generic.c3
-rw-r--r--crypto/cast6_generic.c3
-rw-r--r--crypto/chacha20_generic.c33
-rw-r--r--crypto/crc32_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/cryptd.c17
-rw-r--r--crypto/crypto_user.c4
-rw-r--r--crypto/ecc.c2
-rw-r--r--crypto/echainiv.c5
-rw-r--r--crypto/gcm.c4
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--crypto/internal.h8
-rw-r--r--crypto/keywrap.c4
-rw-r--r--crypto/mcryptd.c11
-rw-r--r--crypto/poly1305_generic.c27
-rw-r--r--crypto/proc.c2
-rw-r--r--crypto/salsa20_generic.c240
-rw-r--r--crypto/seqiv.c5
-rw-r--r--crypto/sha3_generic.c332
-rw-r--r--crypto/shash.c25
-rw-r--r--crypto/simd.c4
-rw-r--r--crypto/skcipher.c30
-rw-r--r--crypto/tcrypt.c1083
-rw-r--r--crypto/testmgr.c41
-rw-r--r--crypto/testmgr.h550
-rw-r--r--crypto/twofish_common.c5
-rw-r--r--crypto/twofish_generic.c5
-rw-r--r--crypto/xcbc.c3
44 files changed, 2032 insertions, 653 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 20360e040425..b75264b09a46 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -131,7 +131,7 @@ config CRYPTO_DH
config CRYPTO_ECDH
tristate "ECDH algorithm"
- select CRYTPO_KPP
+ select CRYPTO_KPP
select CRYPTO_RNG_DEFAULT
help
Generic implementation of the ECDH algorithm
@@ -1340,6 +1340,7 @@ config CRYPTO_SALSA20_586
tristate "Salsa20 stream cipher algorithm (i586)"
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
@@ -1353,6 +1354,7 @@ config CRYPTO_SALSA20_X86_64
tristate "Salsa20 stream cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index d674884b2d51..cdbc03b35510 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
+CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 1441f07d0a19..09776bb1360e 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -18,9 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -28,7 +26,6 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <crypto/ablk_helper.h>
diff --git a/crypto/aead.c b/crypto/aead.c
index f794b30a9407..60b3bbe973e7 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -54,11 +54,18 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_aead_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return setkey_unaligned(tfm, key, keylen);
+ err = setkey_unaligned(tfm, key, keylen);
+ else
+ err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setkey);
@@ -93,6 +100,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
+ crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+
aead->authsize = alg->maxauthsize;
if (alg->exit)
@@ -295,7 +304,7 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher2();
+ ctx->sknull = crypto_get_default_null_skcipher();
err = PTR_ERR(ctx->sknull);
if (IS_ERR(ctx->sknull))
goto out;
@@ -315,7 +324,7 @@ out:
return err;
drop_null:
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -325,7 +334,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f41047ab60f5..0f8d8d5523c3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- const u32 forbidden = CRYPTO_ALG_INTERNAL;
+ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
if (sock->state == SS_CONNECTED)
return -EINVAL;
@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(type))
return PTR_ERR(type);
- private = type->bind(sa->salg_name,
- sa->salg_feat & ~forbidden,
- sa->salg_mask & ~forbidden);
+ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3a35d67de7d9..266fc1d64f61 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return ahash_setkey_unaligned(tfm, key, keylen);
+ err = ahash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = tfm->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return tfm->setkey(tfm, key, keylen);
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
@@ -368,7 +375,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_ahash_op(req, tfm->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -450,7 +462,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
- hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@@ -465,7 +476,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
if (alg->setkey) {
hash->setkey = alg->setkey;
- hash->has_setkey = true;
+ if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
}
if (alg->export)
hash->export = alg->export;
@@ -649,5 +661,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type != &crypto_ahash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != NULL;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 9a636f961572..395b082d03a9 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -62,7 +62,7 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_priority < 0)
return -EINVAL;
- atomic_set(&alg->cra_refcnt, 1);
+ refcount_set(&alg->cra_refcnt, 1);
return crypto_set_driver_name(alg);
}
@@ -123,7 +123,6 @@ static void crypto_remove_instance(struct crypto_instance *inst,
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
@@ -236,7 +235,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (!larval->adult)
goto free_larval;
- atomic_set(&larval->alg.cra_refcnt, 1);
+ refcount_set(&larval->alg.cra_refcnt, 1);
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
larval->alg.cra_priority = alg->cra_priority;
@@ -392,7 +391,6 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
alg->cra_flags |= CRYPTO_ALG_DEAD;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(alg, list, NULL);
@@ -411,7 +409,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
if (ret)
return ret;
- BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
+ BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -470,7 +468,6 @@ int crypto_register_template(struct crypto_template *tmpl)
}
list_add(&tmpl->list, &crypto_template_list);
- crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
err = 0;
out:
up_write(&crypto_alg_sem);
@@ -497,12 +494,10 @@ void crypto_unregister_template(struct crypto_template *tmpl)
BUG_ON(err);
}
- crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
-
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, n, list, list) {
- BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
+ BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1);
crypto_free_instance(inst);
}
crypto_remove_final(&users);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e9885a35ef6e..4b07edd5a9ff 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,6 @@
struct aead_tfm {
struct crypto_aead *aead;
- bool has_key;
struct crypto_skcipher *null_tfm;
};
@@ -398,7 +397,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -491,7 +490,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
return ERR_CAST(aead);
}
- null_tfm = crypto_get_default_null_skcipher2();
+ null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
@@ -509,7 +508,7 @@ static void aead_release(void *private)
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
kfree(tfm);
}
@@ -523,12 +522,8 @@ static int aead_setauthsize(void *private, unsigned int authsize)
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct aead_tfm *tfm = private;
- int err;
-
- err = crypto_aead_setkey(tfm->aead, key, keylen);
- tfm->has_key = !err;
- return err;
+ return crypto_aead_setkey(tfm->aead, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -589,7 +584,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 76d2e716c792..6c9b1927a520 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,11 +34,6 @@ struct hash_ctx {
struct ahash_request req;
};
-struct algif_hash_tfm {
- struct crypto_ahash *hash;
- bool has_key;
-};
-
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
@@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct algif_hash_tfm *tfm;
+ struct crypto_ahash *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = {
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- struct algif_hash_tfm *tfm;
- struct crypto_ahash *hash;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- hash = crypto_alloc_ahash(name, type, mask);
- if (IS_ERR(hash)) {
- kfree(tfm);
- return ERR_CAST(hash);
- }
-
- tfm->hash = hash;
-
- return tfm;
+ return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
- struct algif_hash_tfm *tfm = private;
-
- crypto_free_ahash(tfm->hash);
- kfree(tfm);
+ crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct algif_hash_tfm *tfm = private;
- int err;
-
- err = crypto_ahash_setkey(tfm->hash, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
@@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk)
static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
- struct hash_ctx *ctx;
+ struct crypto_ahash *tfm = private;
struct alg_sock *ask = alg_sk(sk);
- struct algif_hash_tfm *tfm = private;
- struct crypto_ahash *hash = tfm->hash;
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ struct hash_ctx *ctx;
+ unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, hash);
+ ahash_request_set_tfm(&ctx->req, tfm);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
@@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
static int hash_accept_parent(void *private, struct sock *sk)
{
- struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *tfm = private;
- if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return hash_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index f50907430c92..c4e885df4564 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -38,11 +38,6 @@
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_tfm {
- struct crypto_skcipher *skcipher;
- bool has_key;
-};
-
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
@@ -50,8 +45,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
@@ -65,8 +59,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_blocksize(tfm);
struct af_alg_async_req *areq;
int err = 0;
@@ -220,7 +213,7 @@ static int skcipher_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct skcipher_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -234,7 +227,7 @@ static int skcipher_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -313,41 +306,17 @@ static struct proto_ops algif_skcipher_ops_nokey = {
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- struct skcipher_tfm *tfm;
- struct crypto_skcipher *skcipher;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- skcipher = crypto_alloc_skcipher(name, type, mask);
- if (IS_ERR(skcipher)) {
- kfree(tfm);
- return ERR_CAST(skcipher);
- }
-
- tfm->skcipher = skcipher;
-
- return tfm;
+ return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
- struct skcipher_tfm *tfm = private;
-
- crypto_free_skcipher(tfm->skcipher);
- kfree(tfm);
+ crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct skcipher_tfm *tfm = private;
- int err;
-
- err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_sock_destruct(struct sock *sk)
@@ -356,8 +325,7 @@ static void skcipher_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
@@ -369,22 +337,21 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct skcipher_tfm *tfm = private;
- struct crypto_skcipher *skcipher = tfm->skcipher;
+ struct crypto_skcipher *tfm = private;
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
@@ -404,9 +371,9 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
static int skcipher_accept_parent(void *private, struct sock *sk)
{
- struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *tfm = private;
- if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return skcipher_accept_parent_nokey(private, sk);
diff --git a/crypto/api.c b/crypto/api.c
index 2a2479d168aa..70a894e52ff3 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (IS_ERR(larval))
return ERR_CAST(larval);
- atomic_set(&larval->alg.cra_refcnt, 2);
+ refcount_set(&larval->alg.cra_refcnt, 2);
down_write(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
@@ -205,7 +205,8 @@ struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alg_lookup);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
+static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
@@ -231,7 +232,6 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
return crypto_larval_add(name, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_larval_lookup);
int crypto_probing_notify(unsigned long val, void *v)
{
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 875470b0e026..d3d6d72fe649 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -329,7 +329,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -363,7 +363,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0cf5fefdb859..15f91ddd7f0e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -352,7 +352,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -389,7 +389,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6c43a0a17a55..01c0d4aa2563 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -18,7 +18,6 @@
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/errno.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index a02286bf319e..32ddd4836ff5 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index df5c72629383..66169c178314 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -16,8 +16,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 058c8d755d03..c8e5ec69790e 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -13,8 +13,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 4a45fa4890c0..e451c3cb6a56 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -9,44 +9,38 @@
* (at your option) any later version.
*/
+#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/chacha20.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
- u8 stream[CHACHA20_BLOCK_SIZE];
+ u32 stream[CHACHA20_BLOCK_WORDS];
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
+ crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, bytes);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
}
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
{
- static const char constant[16] = "expand 32-byte k";
-
- state[0] = le32_to_cpuvp(constant + 0);
- state[1] = le32_to_cpuvp(constant + 4);
- state[2] = le32_to_cpuvp(constant + 8);
- state[3] = le32_to_cpuvp(constant + 12);
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
@@ -55,10 +49,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
- state[12] = le32_to_cpuvp(iv + 0);
- state[13] = le32_to_cpuvp(iv + 4);
- state[14] = le32_to_cpuvp(iv + 8);
- state[15] = le32_to_cpuvp(iv + 12);
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_init);
@@ -72,7 +66,7 @@ int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32));
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
@@ -111,7 +105,6 @@ static struct skcipher_alg alg = {
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_alignmask = sizeof(u32) - 1,
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA20_KEY_SIZE,
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index aa2a25fc7482..718cbce8d169 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 4c0a0e271876..372320399622 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -146,6 +146,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct chksum_ctx),
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index bd43cf5be14c..addca7bae33f 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -32,7 +32,9 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#define CRYPTD_MAX_CPU_QLEN 1000
+static unsigned int cryptd_max_cpu_qlen = 1000;
+module_param(cryptd_max_cpu_qlen, uint, 0);
+MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
struct cryptd_cpu_queue {
struct crypto_queue queue;
@@ -116,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
}
+ pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
}
@@ -893,10 +896,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
@@ -911,7 +913,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- inst->alg.setkey = cryptd_hash_setkey;
+ if (crypto_shash_alg_has_setkey(salg))
+ inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
@@ -1372,7 +1375,7 @@ static int __init cryptd_init(void)
{
int err;
- err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
+ err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
if (err)
return err;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 0dbe2be7f783..5c291eedaa70 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -169,7 +169,7 @@ static int crypto_report_one(struct crypto_alg *alg,
ualg->cru_type = 0;
ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
- ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
@@ -387,7 +387,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
goto drop_alg;
err = -EBUSY;
- if (atomic_read(&alg->cra_refcnt) > 2)
+ if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
err = crypto_unregister_instance((struct crypto_instance *)alg);
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 633a9bcdc574..18f32f2a5e1c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
- err = -EFAULT;
+ return -EFAULT;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index e3d889b122e0..45819e6015bf 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -118,8 +118,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -127,9 +125,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 8589681fb9f6..0ad879e1f9b2 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1101,7 +1101,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
@@ -1129,7 +1129,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 24e601954c7a..a4b1c026aaee 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
-
- /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
u64 _tt = gf128mul_table_be[a >> 56];
r->a = cpu_to_le64((a << 8) | (b >> 56));
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 12ad3e3a84e3..1bffb3f712dd 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -56,9 +56,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -111,9 +108,6 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
diff --git a/crypto/internal.h b/crypto/internal.h
index f07320423191..5ac27fba10e8 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -30,9 +30,6 @@
enum {
CRYPTO_MSG_ALG_REQUEST,
CRYPTO_MSG_ALG_REGISTER,
- CRYPTO_MSG_ALG_UNREGISTER,
- CRYPTO_MSG_TMPL_REGISTER,
- CRYPTO_MSG_TMPL_UNREGISTER,
};
struct crypto_instance;
@@ -78,7 +75,6 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
@@ -106,13 +102,13 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
- atomic_inc(&alg->cra_refcnt);
+ refcount_inc(&alg->cra_refcnt);
return alg;
}
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
+ if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
alg->cra_destroy(alg);
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 744e35134c45..ec5c6a087c90 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -188,7 +188,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
}
/* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
+ if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
ret = -EBADMSG;
memzero_explicit(&block, sizeof(struct crypto_kw_block));
@@ -221,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
+ block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index eca04d3729b3..fe5129d6ff4e 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/sched/stat.h>
#include <linux/slab.h>
-#include <linux/hardirq.h>
#define MCRYPTD_MAX_CPU_QLEN 100
#define MCRYPTD_BATCH 9
@@ -517,10 +516,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = halg->digestsize;
inst->alg.halg.statesize = halg->statesize;
@@ -535,7 +533,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = mcryptd_hash_finup_enqueue;
inst->alg.export = mcryptd_hash_export;
inst->alg.import = mcryptd_hash_import;
- inst->alg.setkey = mcryptd_hash_setkey;
+ if (crypto_hash_alg_has_setkey(halg))
+ inst->alg.setkey = mcryptd_hash_setkey;
inst->alg.digest = mcryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b1c2d57dc734..b7a3a0613a30 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
}
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- /* Poly1305 requires a unique key for each tag, which implies that
- * we can't set it on the tfm that gets accessed by multiple users
- * simultaneously. Instead we expect the key as the first 32 bytes in
- * the update() call. */
- return -ENOTSUPP;
-}
-EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
-
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
@@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
dctx->s[3] = get_unaligned_le32(key + 12);
}
+/*
+ * Poly1305 requires a unique key for each tag, which implies that we can't set
+ * it on the tfm that gets accessed by multiple users simultaneously. Instead we
+ * expect the key as the first 32 bytes in the update() call.
+ */
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen)
{
@@ -210,7 +204,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- __le32 *mac = (__le32 *)dst;
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u32 mask;
@@ -267,10 +260,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
h3 = (h3 >> 18) | (h4 << 8);
/* mac = (h + s) % (2^128) */
- f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f);
- f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f);
- f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f);
- f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f);
+ f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0);
+ f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12);
return 0;
}
@@ -281,14 +274,12 @@ static struct shash_alg poly1305_alg = {
.init = crypto_poly1305_init,
.update = crypto_poly1305_update,
.final = crypto_poly1305_final,
- .setkey = crypto_poly1305_setkey,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_alignmask = sizeof(u32) - 1,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/proc.c b/crypto/proc.c
index 2cc10c96d753..822fcef6d91c 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -46,7 +46,7 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
- seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
+ seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt));
seq_printf(m, "selftest : %s\n",
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index d7da0eea5622..5074006a56c3 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -19,49 +19,19 @@
*
*/
-#include <linux/init.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/salsa20.h>
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <crypto/algapi.h>
-#include <asm/byteorder.h>
-#define SALSA20_IV_SIZE 8U
-#define SALSA20_MIN_KEY_SIZE 16U
-#define SALSA20_MAX_KEY_SIZE 32U
-
-/*
- * Start of code taken from D. J. Bernstein's reference implementation.
- * With some modifications and optimizations made to suit our needs.
- */
-
-/*
-salsa20-ref.c version 20051118
-D. J. Bernstein
-Public domain.
-*/
-
-#define U32TO8_LITTLE(p, v) \
- { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \
- (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
-#define U8TO32_LITTLE(p) \
- (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
- ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
-
-struct salsa20_ctx
-{
- u32 input[16];
-};
-
-static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
+static void salsa20_block(u32 *state, __le32 *stream)
{
u32 x[16];
int i;
- memcpy(x, input, sizeof(x));
- for (i = 20; i > 0; i -= 2) {
+ memcpy(x, state, sizeof(x));
+
+ for (i = 0; i < 20; i += 2) {
x[ 4] ^= rol32((x[ 0] + x[12]), 7);
x[ 8] ^= rol32((x[ 4] + x[ 0]), 9);
x[12] ^= rol32((x[ 8] + x[ 4]), 13);
@@ -95,145 +65,137 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
x[14] ^= rol32((x[13] + x[12]), 13);
x[15] ^= rol32((x[14] + x[13]), 18);
}
- for (i = 0; i < 16; ++i)
- x[i] += input[i];
- for (i = 0; i < 16; ++i)
- U32TO8_LITTLE(output + 4 * i,x[i]);
-}
-static const char sigma[16] = "expand 32-byte k";
-static const char tau[16] = "expand 16-byte k";
+ for (i = 0; i < 16; i++)
+ stream[i] = cpu_to_le32(x[i] + state[i]);
+
+ if (++state[8] == 0)
+ state[9]++;
+}
-static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
+static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
{
- const char *constants;
+ __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
- ctx->input[1] = U8TO32_LITTLE(k + 0);
- ctx->input[2] = U8TO32_LITTLE(k + 4);
- ctx->input[3] = U8TO32_LITTLE(k + 8);
- ctx->input[4] = U8TO32_LITTLE(k + 12);
- if (kbytes == 32) { /* recommended */
- k += 16;
- constants = sigma;
- } else { /* kbytes == 16 */
- constants = tau;
+ if (dst != src)
+ memcpy(dst, src, bytes);
+
+ while (bytes >= SALSA20_BLOCK_SIZE) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+ bytes -= SALSA20_BLOCK_SIZE;
+ dst += SALSA20_BLOCK_SIZE;
+ }
+ if (bytes) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
- ctx->input[11] = U8TO32_LITTLE(k + 0);
- ctx->input[12] = U8TO32_LITTLE(k + 4);
- ctx->input[13] = U8TO32_LITTLE(k + 8);
- ctx->input[14] = U8TO32_LITTLE(k + 12);
- ctx->input[0] = U8TO32_LITTLE(constants + 0);
- ctx->input[5] = U8TO32_LITTLE(constants + 4);
- ctx->input[10] = U8TO32_LITTLE(constants + 8);
- ctx->input[15] = U8TO32_LITTLE(constants + 12);
}
-static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv)
{
- ctx->input[6] = U8TO32_LITTLE(iv + 0);
- ctx->input[7] = U8TO32_LITTLE(iv + 4);
- ctx->input[8] = 0;
- ctx->input[9] = 0;
+ memcpy(state, ctx->initial_state, sizeof(ctx->initial_state));
+ state[6] = get_unaligned_le32(iv + 0);
+ state[7] = get_unaligned_le32(iv + 4);
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_init);
-static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
- const u8 *src, unsigned int bytes)
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
{
- u8 buf[64];
-
- if (dst != src)
- memcpy(dst, src, bytes);
-
- while (bytes) {
- salsa20_wordtobyte(buf, ctx->input);
-
- ctx->input[8]++;
- if (!ctx->input[8])
- ctx->input[9]++;
+ static const char sigma[16] = "expand 32-byte k";
+ static const char tau[16] = "expand 16-byte k";
+ struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const char *constants;
- if (bytes <= 64) {
- crypto_xor(dst, buf, bytes);
- return;
- }
+ if (keysize != SALSA20_MIN_KEY_SIZE &&
+ keysize != SALSA20_MAX_KEY_SIZE)
+ return -EINVAL;
- crypto_xor(dst, buf, 64);
- bytes -= 64;
- dst += 64;
+ ctx->initial_state[1] = get_unaligned_le32(key + 0);
+ ctx->initial_state[2] = get_unaligned_le32(key + 4);
+ ctx->initial_state[3] = get_unaligned_le32(key + 8);
+ ctx->initial_state[4] = get_unaligned_le32(key + 12);
+ if (keysize == 32) { /* recommended */
+ key += 16;
+ constants = sigma;
+ } else { /* keysize == 16 */
+ constants = tau;
}
-}
-
-/*
- * End of code taken from D. J. Bernstein's reference implementation.
- */
+ ctx->initial_state[11] = get_unaligned_le32(key + 0);
+ ctx->initial_state[12] = get_unaligned_le32(key + 4);
+ ctx->initial_state[13] = get_unaligned_le32(key + 8);
+ ctx->initial_state[14] = get_unaligned_le32(key + 12);
+ ctx->initial_state[0] = get_unaligned_le32(constants + 0);
+ ctx->initial_state[5] = get_unaligned_le32(constants + 4);
+ ctx->initial_state[10] = get_unaligned_le32(constants + 8);
+ ctx->initial_state[15] = get_unaligned_le32(constants + 12);
+
+ /* space for the nonce; it will be overridden for each request */
+ ctx->initial_state[6] = 0;
+ ctx->initial_state[7] = 0;
+
+ /* initial block number */
+ ctx->initial_state[8] = 0;
+ ctx->initial_state[9] = 0;
-static int setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize)
-{
- struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
- salsa20_keysetup(ctx, key, keysize);
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_setkey);
-static int encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int salsa20_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ u32 state[16];
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 64);
+ err = skcipher_walk_virt(&walk, req, true);
- salsa20_ivsetup(ctx, walk.iv);
+ crypto_salsa20_init(state, ctx, walk.iv);
- while (walk.nbytes >= 64) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr,
- walk.nbytes - (walk.nbytes % 64));
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- if (walk.nbytes) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr, walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "salsa20",
- .cra_driver_name = "salsa20-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_type = &crypto_blkcipher_type,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct salsa20_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .setkey = setkey,
- .encrypt = encrypt,
- .decrypt = encrypt,
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- }
- }
+static struct skcipher_alg alg = {
+ .base.cra_name = "salsa20",
+ .base.cra_driver_name = "salsa20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct salsa20_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = SALSA20_MIN_KEY_SIZE,
+ .max_keysize = SALSA20_MAX_KEY_SIZE,
+ .ivsize = SALSA20_IV_SIZE,
+ .chunksize = SALSA20_BLOCK_SIZE,
+ .setkey = crypto_salsa20_setkey,
+ .encrypt = salsa20_crypt,
+ .decrypt = salsa20_crypt,
};
static int __init salsa20_generic_mod_init(void)
{
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit salsa20_generic_mod_fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(salsa20_generic_mod_init);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 570b7d1aa0ca..39dbf2f7e5f5 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -144,8 +144,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -153,9 +151,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize != sizeof(u64))
goto free_inst;
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7e8ed96236ce..a965b9d80559 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -5,6 +5,7 @@
* http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -17,12 +18,10 @@
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sha3.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#define KECCAK_ROUNDS 24
-#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
-
static const u64 keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
@@ -34,100 +33,133 @@ static const u64 keccakf_rndc[24] = {
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
-static const int keccakf_rotc[24] = {
- 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
- 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
-};
-
-static const int keccakf_piln[24] = {
- 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
- 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
-};
-
/* update the state with given number of rounds */
-static void keccakf(u64 st[25])
+static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25])
{
- int i, j, round;
- u64 t, bc[5];
+ u64 t[5], tt, bc[5];
+ int round;
for (round = 0; round < KECCAK_ROUNDS; round++) {
/* Theta */
- for (i = 0; i < 5; i++)
- bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
- ^ st[i + 20];
-
- for (i = 0; i < 5; i++) {
- t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
- for (j = 0; j < 25; j += 5)
- st[j + i] ^= t;
- }
+ bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+ bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+ bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+ bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+ bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+ t[0] = bc[4] ^ rol64(bc[1], 1);
+ t[1] = bc[0] ^ rol64(bc[2], 1);
+ t[2] = bc[1] ^ rol64(bc[3], 1);
+ t[3] = bc[2] ^ rol64(bc[4], 1);
+ t[4] = bc[3] ^ rol64(bc[0], 1);
+
+ st[0] ^= t[0];
/* Rho Pi */
- t = st[1];
- for (i = 0; i < 24; i++) {
- j = keccakf_piln[i];
- bc[0] = st[j];
- st[j] = ROTL64(t, keccakf_rotc[i]);
- t = bc[0];
- }
+ tt = st[1];
+ st[ 1] = rol64(st[ 6] ^ t[1], 44);
+ st[ 6] = rol64(st[ 9] ^ t[4], 20);
+ st[ 9] = rol64(st[22] ^ t[2], 61);
+ st[22] = rol64(st[14] ^ t[4], 39);
+ st[14] = rol64(st[20] ^ t[0], 18);
+ st[20] = rol64(st[ 2] ^ t[2], 62);
+ st[ 2] = rol64(st[12] ^ t[2], 43);
+ st[12] = rol64(st[13] ^ t[3], 25);
+ st[13] = rol64(st[19] ^ t[4], 8);
+ st[19] = rol64(st[23] ^ t[3], 56);
+ st[23] = rol64(st[15] ^ t[0], 41);
+ st[15] = rol64(st[ 4] ^ t[4], 27);
+ st[ 4] = rol64(st[24] ^ t[4], 14);
+ st[24] = rol64(st[21] ^ t[1], 2);
+ st[21] = rol64(st[ 8] ^ t[3], 55);
+ st[ 8] = rol64(st[16] ^ t[1], 45);
+ st[16] = rol64(st[ 5] ^ t[0], 36);
+ st[ 5] = rol64(st[ 3] ^ t[3], 28);
+ st[ 3] = rol64(st[18] ^ t[3], 21);
+ st[18] = rol64(st[17] ^ t[2], 15);
+ st[17] = rol64(st[11] ^ t[1], 10);
+ st[11] = rol64(st[ 7] ^ t[2], 6);
+ st[ 7] = rol64(st[10] ^ t[0], 3);
+ st[10] = rol64( tt ^ t[1], 1);
/* Chi */
- for (j = 0; j < 25; j += 5) {
- for (i = 0; i < 5; i++)
- bc[i] = st[j + i];
- for (i = 0; i < 5; i++)
- st[j + i] ^= (~bc[(i + 1) % 5]) &
- bc[(i + 2) % 5];
- }
+ bc[ 0] = ~st[ 1] & st[ 2];
+ bc[ 1] = ~st[ 2] & st[ 3];
+ bc[ 2] = ~st[ 3] & st[ 4];
+ bc[ 3] = ~st[ 4] & st[ 0];
+ bc[ 4] = ~st[ 0] & st[ 1];
+ st[ 0] ^= bc[ 0];
+ st[ 1] ^= bc[ 1];
+ st[ 2] ^= bc[ 2];
+ st[ 3] ^= bc[ 3];
+ st[ 4] ^= bc[ 4];
+
+ bc[ 0] = ~st[ 6] & st[ 7];
+ bc[ 1] = ~st[ 7] & st[ 8];
+ bc[ 2] = ~st[ 8] & st[ 9];
+ bc[ 3] = ~st[ 9] & st[ 5];
+ bc[ 4] = ~st[ 5] & st[ 6];
+ st[ 5] ^= bc[ 0];
+ st[ 6] ^= bc[ 1];
+ st[ 7] ^= bc[ 2];
+ st[ 8] ^= bc[ 3];
+ st[ 9] ^= bc[ 4];
+
+ bc[ 0] = ~st[11] & st[12];
+ bc[ 1] = ~st[12] & st[13];
+ bc[ 2] = ~st[13] & st[14];
+ bc[ 3] = ~st[14] & st[10];
+ bc[ 4] = ~st[10] & st[11];
+ st[10] ^= bc[ 0];
+ st[11] ^= bc[ 1];
+ st[12] ^= bc[ 2];
+ st[13] ^= bc[ 3];
+ st[14] ^= bc[ 4];
+
+ bc[ 0] = ~st[16] & st[17];
+ bc[ 1] = ~st[17] & st[18];
+ bc[ 2] = ~st[18] & st[19];
+ bc[ 3] = ~st[19] & st[15];
+ bc[ 4] = ~st[15] & st[16];
+ st[15] ^= bc[ 0];
+ st[16] ^= bc[ 1];
+ st[17] ^= bc[ 2];
+ st[18] ^= bc[ 3];
+ st[19] ^= bc[ 4];
+
+ bc[ 0] = ~st[21] & st[22];
+ bc[ 1] = ~st[22] & st[23];
+ bc[ 2] = ~st[23] & st[24];
+ bc[ 3] = ~st[24] & st[20];
+ bc[ 4] = ~st[20] & st[21];
+ st[20] ^= bc[ 0];
+ st[21] ^= bc[ 1];
+ st[22] ^= bc[ 2];
+ st[23] ^= bc[ 3];
+ st[24] ^= bc[ 4];
/* Iota */
st[0] ^= keccakf_rndc[round];
}
}
-static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
-{
- memset(sctx, 0, sizeof(*sctx));
- sctx->md_len = digest_sz;
- sctx->rsiz = 200 - 2 * digest_sz;
- sctx->rsizw = sctx->rsiz / 8;
-}
-
-static int sha3_224_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_224_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_256_init(struct shash_desc *desc)
+int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- sha3_init(sctx, SHA3_256_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_384_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_384_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_512_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
+ sctx->rsiz = 200 - 2 * digest_size;
+ sctx->rsizw = sctx->rsiz / 8;
+ sctx->partial = 0;
- sha3_init(sctx, SHA3_512_DIGEST_SIZE);
+ memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_init);
-static int sha3_update(struct shash_desc *desc, const u8 *data,
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
@@ -149,7 +181,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int i;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) src)[i];
+ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
keccakf(sctx->st);
done += sctx->rsiz;
@@ -163,125 +195,89 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_update);
-static int sha3_final(struct shash_desc *desc, u8 *out)
+int crypto_sha3_final(struct shash_desc *desc, u8 *out)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int i, inlen = sctx->partial;
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ __le64 *digest = (__le64 *)out;
sctx->buf[inlen++] = 0x06;
memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
sctx->buf[sctx->rsiz - 1] |= 0x80;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
+ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
keccakf(sctx->st);
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] = cpu_to_le64(sctx->st[i]);
+ for (i = 0; i < digest_size / 8; i++)
+ put_unaligned_le64(sctx->st[i], digest++);
- memcpy(out, sctx->st, sctx->md_len);
+ if (digest_size & 4)
+ put_unaligned_le32(sctx->st[i], (__le32 *)digest);
memset(sctx, 0, sizeof(*sctx));
return 0;
}
-
-static struct shash_alg sha3_224 = {
- .digestsize = SHA3_224_DIGEST_SIZE,
- .init = sha3_224_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-224",
- .cra_driver_name = "sha3-224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_256 = {
- .digestsize = SHA3_256_DIGEST_SIZE,
- .init = sha3_256_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-256",
- .cra_driver_name = "sha3-256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_384 = {
- .digestsize = SHA3_384_DIGEST_SIZE,
- .init = sha3_384_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-384",
- .cra_driver_name = "sha3-384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_384_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_512 = {
- .digestsize = SHA3_512_DIGEST_SIZE,
- .init = sha3_512_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-512",
- .cra_driver_name = "sha3-512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
+EXPORT_SYMBOL(crypto_sha3_final);
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
static int __init sha3_generic_mod_init(void)
{
- int ret;
-
- ret = crypto_register_shash(&sha3_224);
- if (ret < 0)
- goto err_out;
- ret = crypto_register_shash(&sha3_256);
- if (ret < 0)
- goto err_out_224;
- ret = crypto_register_shash(&sha3_384);
- if (ret < 0)
- goto err_out_256;
- ret = crypto_register_shash(&sha3_512);
- if (ret < 0)
- goto err_out_384;
-
- return 0;
-
-err_out_384:
- crypto_unregister_shash(&sha3_384);
-err_out_256:
- crypto_unregister_shash(&sha3_256);
-err_out_224:
- crypto_unregister_shash(&sha3_224);
-err_out:
- return ret;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit sha3_generic_mod_fini(void)
{
- crypto_unregister_shash(&sha3_224);
- crypto_unregister_shash(&sha3_256);
- crypto_unregister_shash(&sha3_384);
- crypto_unregister_shash(&sha3_512);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha3_generic_mod_init);
diff --git a/crypto/shash.c b/crypto/shash.c
index e849d3ee2e27..5d732c6bb4b2 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return shash_setkey_unaligned(tfm, key, keylen);
+ err = shash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = shash->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return shash->setkey(tfm, key, keylen);
+ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
@@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
@@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
- crt->has_setkey = alg->setkey != shash_no_setkey;
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
if (alg->export)
crt->export = shash_async_export;
@@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+
+ hash->descsize = alg->descsize;
+
+ if (crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
- hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
diff --git a/crypto/simd.c b/crypto/simd.c
index 88203370a62f..208226d7f908 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 11af5fd6a443..0fe2a2923ad0 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -598,8 +598,11 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
@@ -674,6 +677,9 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -692,8 +698,11 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
crypto_skcipher_set_flags(tfm,
crypto_ablkcipher_get_flags(ablkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
@@ -767,6 +776,9 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
sizeof(struct ablkcipher_request);
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -796,6 +808,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ int err;
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -803,9 +816,15 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if ((unsigned long)key & alignmask)
- return skcipher_setkey_unaligned(tfm, key, keylen);
+ err = skcipher_setkey_unaligned(tfm, key, keylen);
+ else
+ err = cipher->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return cipher->setkey(tfm, key, keylen);
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
@@ -834,6 +853,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9267cbdb14d2..14213a096fd2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -67,6 +67,7 @@ static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
+static u32 num_mb = 8;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
@@ -79,6 +80,66 @@ static char *check[] = {
NULL
};
+static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
+static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
+
+#define XBUFSIZE 8
+#define MAX_IVLEN 32
+
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++) {
+ buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf[i])
+ goto err_free_buf;
+ }
+
+ return 0;
+
+err_free_buf:
+ while (i-- > 0)
+ free_page((unsigned long)buf[i]);
+
+ return -ENOMEM;
+}
+
+static void testmgr_free_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++)
+ free_page((unsigned long)buf[i]);
+}
+
+static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
+ unsigned int buflen, const void *assoc,
+ unsigned int aad_size)
+{
+ int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
+ int k, rem;
+
+ if (np > XBUFSIZE) {
+ rem = PAGE_SIZE;
+ np = XBUFSIZE;
+ } else {
+ rem = buflen % PAGE_SIZE;
+ }
+
+ sg_init_table(sg, np + 1);
+
+ sg_set_buf(&sg[0], assoc, aad_size);
+
+ if (rem)
+ np--;
+ for (k = 0; k < np; k++)
+ sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
+
+ if (rem)
+ sg_set_buf(&sg[k + 1], xbuf[k], rem);
+}
+
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -86,6 +147,298 @@ static inline int do_one_aead_op(struct aead_request *req, int ret)
return crypto_wait_req(ret, wait);
}
+struct test_mb_aead_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct scatterlist sgout[XBUFSIZE];
+ struct aead_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ char *axbuf[XBUFSIZE];
+};
+
+static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_aead_encrypt(data[i].req);
+ else
+ rc[i] = crypto_aead_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_aead_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_aead_speed(const char *algo, int enc, int secs,
+ struct aead_speed_template *template,
+ unsigned int tcount, u8 authsize,
+ unsigned int aad_size, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_aead_data *data;
+ struct crypto_aead *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ void *assoc;
+ u32 *b_size;
+ char *iv;
+ int ret;
+
+
+ if (aad_size >= PAGE_SIZE) {
+ pr_err("associate data length (%u) too big\n", aad_size);
+ return;
+ }
+
+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
+ if (!iv)
+ return;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_iv;
+
+ tfm = crypto_alloc_aead(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, authsize);
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].axbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].axbuf);
+ goto out_free_xbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xoutbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xoutbuf);
+ goto out_free_axbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ aead_request_free(data[i].req);
+ goto out_free_xoutbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ crypto_init_wait(&data[i].wait);
+ aead_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_aead, tfm), e);
+
+ i = 0;
+ do {
+ b_size = aead_sizes;
+ do {
+ if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ authsize + *b_size,
+ XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_aead_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_aead_ivsize(tfm);
+ if (iv_len)
+ memset(iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_aead_data *cur = &data[j];
+
+ assoc = cur->axbuf[0];
+ memset(assoc, 0xff, aad_size);
+
+ sg_init_aead(cur->sg, cur->xbuf,
+ *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
+
+ sg_init_aead(cur->sgout, cur->xoutbuf,
+ *b_size + (enc ? authsize : 0),
+ assoc, aad_size);
+
+ aead_request_set_ad(cur->req, aad_size);
+
+ if (!enc) {
+
+ aead_request_set_crypt(cur->req,
+ cur->sgout,
+ cur->sg,
+ *b_size, iv);
+ ret = crypto_aead_encrypt(cur->req);
+ ret = do_one_aead_op(cur->req, ret);
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
+
+ aead_request_set_crypt(cur->req, cur->sg,
+ cur->sgout, *b_size +
+ (enc ? 0 : authsize),
+ iv);
+
+ }
+
+ if (secs)
+ ret = test_mb_aead_jiffies(data, enc, *b_size,
+ secs, num_mb);
+ else
+ ret = test_mb_aead_cycles(data, enc, *b_size,
+ num_mb);
+
+ if (ret) {
+ pr_err("%s() failed return code=%d\n", e, ret);
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ aead_request_free(data[i].req);
+out_free_xoutbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xoutbuf);
+out_free_axbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].axbuf);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_aead(tfm);
+out_free_data:
+ kfree(data);
+out_free_iv:
+ kfree(iv);
+}
+
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
@@ -151,60 +504,6 @@ out:
return ret;
}
-static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
-static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
-
-#define XBUFSIZE 8
-#define MAX_IVLEN 32
-
-static int testmgr_alloc_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++) {
- buf[i] = (void *)__get_free_page(GFP_KERNEL);
- if (!buf[i])
- goto err_free_buf;
- }
-
- return 0;
-
-err_free_buf:
- while (i-- > 0)
- free_page((unsigned long)buf[i]);
-
- return -ENOMEM;
-}
-
-static void testmgr_free_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++)
- free_page((unsigned long)buf[i]);
-}
-
-static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
- unsigned int buflen)
-{
- int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
- int k, rem;
-
- if (np > XBUFSIZE) {
- rem = PAGE_SIZE;
- np = XBUFSIZE;
- } else {
- rem = buflen % PAGE_SIZE;
- }
-
- sg_init_table(sg, np + 1);
- np--;
- for (k = 0; k < np; k++)
- sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
-
- sg_set_buf(&sg[k + 1], xbuf[k], rem);
-}
-
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
@@ -316,19 +615,37 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out;
}
- sg_init_aead(sg, xbuf,
- *b_size + (enc ? 0 : authsize));
+ sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
- *b_size + (enc ? authsize : 0));
+ *b_size + (enc ? authsize : 0), assoc,
+ aad_size);
- sg_set_buf(&sg[0], assoc, aad_size);
- sg_set_buf(&sgout[0], assoc, aad_size);
+ aead_request_set_ad(req, aad_size);
+
+ if (!enc) {
+
+ /*
+ * For decryption we need a proper auth so
+ * we do the encryption path once with buffers
+ * reversed (input <-> output) to calculate it
+ */
+ aead_request_set_crypt(req, sgout, sg,
+ *b_size, iv);
+ ret = do_one_aead_op(req,
+ crypto_aead_encrypt(req));
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
iv);
- aead_request_set_ad(req, aad_size);
if (secs)
ret = test_aead_jiffies(req, enc, *b_size,
@@ -381,24 +698,98 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
}
struct test_mb_ahash_data {
- struct scatterlist sg[TVMEMSIZE];
+ struct scatterlist sg[XBUFSIZE];
char result[64];
struct ahash_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
-static void test_mb_ahash_speed(const char *algo, unsigned int sec,
- struct hash_speed *speed)
+static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++)
+ rc[i] = crypto_ahash_digest(data[i].req);
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
+ u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_ahash_op(data, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed, u32 num_mb)
{
struct test_mb_ahash_data *data;
struct crypto_ahash *tfm;
- unsigned long start, end;
- unsigned long cycles;
unsigned int i, j, k;
int ret;
- data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
@@ -409,7 +800,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
goto free_data;
}
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < num_mb; ++i) {
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
@@ -424,7 +815,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
ahash_request_set_callback(data[i].req, 0, crypto_req_done,
&data[i].wait);
- test_hash_sg_init(data[i].sg);
+
+ sg_init_table(data[i].sg, XBUFSIZE);
+ for (j = 0; j < XBUFSIZE; j++) {
+ sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
+ memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
+ }
}
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
@@ -435,16 +831,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (speed[i].blen != speed[i].plen)
continue;
- if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ speed[i].blen, XBUFSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
- for (k = 0; k < 8; k++)
+ for (k = 0; k < num_mb; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
data[k].result, speed[i].blen);
@@ -453,34 +849,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- start = get_cycles();
-
- for (k = 0; k < 8; k++) {
- ret = crypto_ahash_digest(data[k].req);
- if (ret == -EINPROGRESS) {
- ret = 0;
- continue;
- }
-
- if (ret)
- break;
-
- crypto_req_done(&data[k].req->base, 0);
- }
-
- for (j = 0; j < k; j++) {
- struct crypto_wait *wait = &data[j].wait;
- int wait_ret;
-
- wait_ret = crypto_wait_req(-EINPROGRESS, wait);
- if (wait_ret)
- ret = wait_ret;
- }
+ if (secs)
+ ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
+ num_mb);
+ else
+ ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- end = get_cycles();
- cycles = end - start;
- pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
- cycles, cycles / (8 * speed[i].blen));
if (ret) {
pr_err("At least one hashing failed ret=%d\n", ret);
@@ -489,10 +863,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
}
out:
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
ahash_request_free(data[k].req);
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
testmgr_free_buf(data[k].xbuf);
crypto_free_ahash(tfm);
@@ -736,6 +1110,254 @@ static void test_hash_speed(const char *algo, unsigned int secs,
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
+struct test_mb_skcipher_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+};
+
+static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_skcipher_encrypt(data[i].req);
+ else
+ rc[i] = crypto_skcipher_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_skcipher_data *data;
+ struct crypto_skcipher *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ u32 *b_size;
+ char iv[128];
+ int ret;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ tfm = crypto_alloc_skcipher(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ skcipher_request_free(data[i].req);
+ goto out_free_xbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ skcipher_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ crypto_init_wait(&data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_skcipher, tfm), e);
+
+ i = 0;
+ do {
+ b_size = block_sizes;
+ do {
+ if (*b_size > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ *b_size, XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_skcipher_clear_flags(tfm, ~0);
+
+ ret = crypto_skcipher_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_skcipher_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_skcipher_ivsize(tfm);
+ if (iv_len)
+ memset(&iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_skcipher_data *cur = &data[j];
+ unsigned int k = *b_size;
+ unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
+ unsigned int p = 0;
+
+ sg_init_table(cur->sg, pages);
+
+ while (k > PAGE_SIZE) {
+ sg_set_buf(cur->sg + p, cur->xbuf[p],
+ PAGE_SIZE);
+ memset(cur->xbuf[p], 0xff, PAGE_SIZE);
+ p++;
+ k -= PAGE_SIZE;
+ }
+
+ sg_set_buf(cur->sg + p, cur->xbuf[p], k);
+ memset(cur->xbuf[p], 0xff, k);
+
+ skcipher_request_set_crypt(cur->req, cur->sg,
+ cur->sg, *b_size,
+ iv);
+ }
+
+ if (secs)
+ ret = test_mb_acipher_jiffies(data, enc,
+ *b_size, secs,
+ num_mb);
+ else
+ ret = test_mb_acipher_cycles(data, enc,
+ *b_size, num_mb);
+
+ if (ret) {
+ pr_err("%s() failed flags=%x\n", e,
+ crypto_skcipher_get_flags(tfm));
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ skcipher_request_free(data[i].req);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_skcipher(tfm);
+out_free_data:
+ kfree(data);
+}
+
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -1557,16 +2179,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
+ test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_20);
+ test_aead_speed("gcm(aes)", DECRYPT, sec,
+ NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
+ test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
+ test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec,
+ NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
@@ -1574,6 +2204,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
+ case 215:
+ test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ break;
+
+ case 216:
+ test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ break;
+
+ case 217:
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1778,19 +2435,23 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
if (mode > 400 && mode < 500) break;
/* fall through */
case 422:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 423:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 424:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 425:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 499:
@@ -2008,6 +2669,218 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_8_32);
break;
+ case 600:
+ test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ break;
+
+ case 601:
+ test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ break;
+
+ case 602:
+ test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 603:
+ test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 604:
+ test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ break;
+
+ case 605:
+ test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 606:
+ test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ break;
+
+ case 607:
+ test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 608:
+ test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 609:
+ test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ break;
+
case 1000:
test_available();
break;
@@ -2069,6 +2942,8 @@ module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
+module_param(num_mb, uint, 0000);
+MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29d7020b8826..d5e23a142a04 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -177,6 +177,18 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int ahash_guard_result(char *result, char c, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (result[i] != c)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
@@ -185,7 +197,8 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
- const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ int digestsize = crypto_ahash_digestsize(tfm);
req = *preq;
statesize = crypto_ahash_statesize(
@@ -196,12 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
goto out_nostate;
}
memcpy(state + statesize, guard, sizeof(guard));
+ memset(result, 1, digestsize);
ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alg: hash: Failed to export() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, export used req->result for %s\n",
+ algo);
+ goto out;
+ }
ahash_request_free(req);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -221,6 +241,12 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, import used req->result for %s\n",
+ algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
@@ -316,18 +342,31 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out;
}
} else {
+ memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: update failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index a714b6293959..6044f6906bd6 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1052,6 +1052,142 @@ static const struct hash_testvec sha3_224_tv_template[] = {
"\xc9\xfd\x55\x74\x49\x44\x79\xba"
"\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
"\xd0\xfc\xce\x33",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26"
+ "\xc3\x88\x20\x71\x15\x06\xe8\x2d"
+ "\xa3\x92\x44\xab\x3e\xe7\xff\x86"
+ "\xb6\x79\x10\x72",
},
};
@@ -1077,6 +1213,142 @@ static const struct hash_testvec sha3_256_tv_template[] = {
"\x49\x10\x03\x76\xa8\x23\x5e\x2c"
"\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
"\xdb\x32\xdd\x97\x49\x6d\x33\x76",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71"
+ "\xf7\xfa\x80\xf5\xea\x11\x03\xb1"
+ "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7"
+ "\x8a\x97\xbb\xf2\x07\x08\x38\x30",
},
};
@@ -1109,6 +1381,144 @@ static const struct hash_testvec sha3_384_tv_template[] = {
"\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
"\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
"\x9e\xef\x51\xac\xd0\x65\x7c\x22",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71"
+ "\xcf\xca\x30\x85\x9b\xc1\x25\xc7"
+ "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b"
+ "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51"
+ "\xd2\x21\xae\x2d\xc7\xae\x8c\x40"
+ "\xb9\xe6\x56\x48\x03\xcd\x88\x6b",
},
};
@@ -1147,6 +1557,146 @@ static const struct hash_testvec sha3_512_tv_template[] = {
"\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
"\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
"\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde"
+ "\xf0\xc6\x42\x17\xd7\xb2\x26\x47"
+ "\x90\x28\xa6\x84\xe8\x49\x7a\x86"
+ "\xd6\xb8\x9e\xf8\x07\x59\x21\x03"
+ "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0"
+ "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b"
+ "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41"
+ "\x32\xc2\x8e\x50\x91\x86\x90\xfb",
},
};
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index 5f62c4f9f6e0..f3a0dd25f871 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -24,9 +24,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index ebf7a3efb572..07e62433fbfb 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -23,9 +23,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index df90b332554c..25c75af50d3f 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author:
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>