summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 00:04:16 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 00:04:16 +0300
commit30066ce675d3af350bc5a53858991c0b518dda00 (patch)
tree75db2274cd0887b11b4e297771287f0fb4c14b81 /crypto
parent6763afe4b9f39142bda2a92d69e62fe85f67251c (diff)
parentc3afafa47898e34eb49828ec4ac92bcdc81c8f0c (diff)
downloadlinux-30066ce675d3af350bc5a53858991c0b518dda00.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.9: API: - The crypto engine code now supports hashes. Algorithms: - Allow keys >= 2048 bits in FIPS mode for RSA. Drivers: - Memory overwrite fix for vmx ghash. - Add support for building ARM sha1-neon in Thumb2 mode. - Reenable ARM ghash-ce code by adding import/export. - Reenable img-hash by adding import/export. - Add support for multiple cores in omap-aes. - Add little-endian support for sha1-powerpc. - Add Cavium HWRNG driver for ThunderX SoC" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits) crypto: caam - treat SGT address pointer as u64 crypto: ccp - Make syslog errors human-readable crypto: ccp - clean up data structure crypto: vmx - Ensure ghash-generic is enabled crypto: testmgr - add guard to dst buffer for ahash_export crypto: caam - Unmap region obtained by of_iomap crypto: sha1-powerpc - little-endian support crypto: gcm - Fix IV buffer size in crypto_gcm_setkey crypto: vmx - Fix memory corruption caused by p8_ghash crypto: ghash-generic - move common definitions to a new header file crypto: caam - fix sg dump hwrng: omap - Only fail if pm_runtime_get_sync returns < 0 crypto: omap-sham - shrink the internal buffer size crypto: omap-sham - add support for export/import crypto: omap-sham - convert driver logic to use sgs for data xmit crypto: omap-sham - change the DMA threshold value to a define crypto: omap-sham - add support functions for sg based data handling crypto: omap-sham - rename sgl to sgl_tmp for deprecation crypto: omap-sham - align algorithms on word offset crypto: omap-sham - add context export/import stubs ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/algif_hash.c73
-rw-r--r--crypto/crct10dif_generic.c5
-rw-r--r--crypto/crypto_engine.c187
-rw-r--r--crypto/drbg.c31
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/ghash-generic.c13
-rw-r--r--crypto/mcryptd.c7
-rw-r--r--crypto/rsa_helper.c4
-rw-r--r--crypto/testmgr.c24
-rw-r--r--crypto/testmgr.h4
-rw-r--r--crypto/xor.c41
-rw-r--r--crypto/xts.c2
12 files changed, 274 insertions, 119 deletions
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 68a5ceaa04c8..2d8466f9e49b 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -39,6 +39,37 @@ struct algif_hash_tfm {
bool has_key;
};
+static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
+{
+ unsigned ds;
+
+ if (ctx->result)
+ return 0;
+
+ ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+
+ ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+ if (!ctx->result)
+ return -ENOMEM;
+
+ memset(ctx->result, 0, ds);
+
+ return 0;
+}
+
+static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
+{
+ unsigned ds;
+
+ if (!ctx->result)
+ return;
+
+ ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+
+ sock_kzfree_s(sk, ctx->result, ds);
+ ctx->result = NULL;
+}
+
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
@@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
if (!ctx->more) {
+ if ((msg->msg_flags & MSG_MORE))
+ hash_free_result(sk, ctx);
+
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
&ctx->completion);
if (err)
@@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock;
+
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
@@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
+ if (!(flags & MSG_MORE)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock;
+ } else if (!ctx->more)
+ hash_free_result(sk, ctx);
+
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
if (!(flags & MSG_MORE)) {
@@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+ bool result;
int err;
if (len > ds)
@@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
+ result = ctx->result;
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock;
+
+ ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+
if (ctx->more) {
ctx->more = 0;
- ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
+ } else if (!result) {
+ err = af_alg_wait_for_completion(
+ crypto_ahash_digest(&ctx->req),
+ &ctx->completion);
}
err = memcpy_to_msg(msg, ctx->result, len);
+ hash_free_result(sk, ctx);
+
unlock:
release_sock(sk);
@@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- sock_kzfree_s(sk, ctx->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+ hash_free_result(sk, ctx);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
@@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
struct algif_hash_tfm *tfm = private;
struct crypto_ahash *hash = tfm->hash;
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
- unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
- if (!ctx->result) {
- sock_kfree_s(sk, ctx, len);
- return -ENOMEM;
- }
-
- memset(ctx->result, 0, ds);
-
+ ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
af_alg_init_completion(&ctx->completion);
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index c1229614c7e3..8e94e29dc6fc 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -107,10 +107,7 @@ static struct shash_alg alg = {
static int __init crct10dif_mod_init(void)
{
- int ret;
-
- ret = crypto_register_shash(&alg);
- return ret;
+ return crypto_register_shash(&alg);
}
static void __exit crct10dif_mod_fini(void)
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index a55c82dd48ef..bfb92ace2c91 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -14,13 +14,12 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
-
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
@@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
- struct ablkcipher_request *req;
+ struct ahash_request *hreq;
+ struct ablkcipher_request *breq;
unsigned long flags;
bool was_busy = false;
- int ret;
+ int ret, rtype;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!async_req)
goto out;
- req = ablkcipher_request_cast(async_req);
-
- engine->cur_req = req;
+ engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
+ rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
@@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
}
- if (engine->prepare_request) {
- ret = engine->prepare_request(engine, engine->cur_req);
+ switch (rtype) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ hreq = ahash_request_cast(engine->cur_req);
+ if (engine->prepare_hash_request) {
+ ret = engine->prepare_hash_request(engine, hreq);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+ ret = engine->hash_one_request(engine, hreq);
if (ret) {
- pr_err("failed to prepare request: %d\n", ret);
+ pr_err("failed to hash one request from queue\n");
goto req_err;
}
- engine->cur_req_prepared = true;
- }
-
- ret = engine->crypt_one_request(engine, engine->cur_req);
- if (ret) {
- pr_err("failed to crypt one request from queue\n");
- goto req_err;
+ return;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ breq = ablkcipher_request_cast(engine->cur_req);
+ if (engine->prepare_cipher_request) {
+ ret = engine->prepare_cipher_request(engine, breq);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+ ret = engine->cipher_one_request(engine, breq);
+ if (ret) {
+ pr_err("failed to cipher one request from queue\n");
+ goto req_err;
+ }
+ return;
+ default:
+ pr_err("failed to prepare request of unknown type\n");
+ return;
}
- return;
req_err:
- crypto_finalize_request(engine, engine->cur_req, ret);
+ switch (rtype) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ hreq = ahash_request_cast(engine->cur_req);
+ crypto_finalize_hash_request(engine, hreq, ret);
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ breq = ablkcipher_request_cast(engine->cur_req);
+ crypto_finalize_cipher_request(engine, breq, ret);
+ break;
+ }
return;
out:
@@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work)
}
/**
- * crypto_transfer_request - transfer the new request into the engine queue
+ * crypto_transfer_cipher_request - transfer the new request into the
+ * enginequeue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, bool need_pump)
+int crypto_transfer_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req,
+ bool need_pump)
{
unsigned long flags;
int ret;
@@ -162,46 +194,125 @@ int crypto_transfer_request(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(crypto_transfer_request);
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
+
+/**
+ * crypto_transfer_cipher_request_to_engine - transfer one request to list
+ * into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return crypto_transfer_cipher_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
+
+/**
+ * crypto_transfer_hash_request - transfer the new request into the
+ * enginequeue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, bool need_pump)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ if (!engine->running) {
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ ret = ahash_enqueue_request(&engine->queue, req);
+
+ if (!engine->busy && need_pump)
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
/**
- * crypto_transfer_request_to_engine - transfer one request to list into the
- * engine queue
+ * crypto_transfer_hash_request_to_engine - transfer one request to list
+ * into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+ struct ahash_request *req)
{
- return crypto_transfer_request(engine, req, true);
+ return crypto_transfer_hash_request(engine, req, true);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
- * crypto_finalize_request - finalize one request if the request is done
+ * crypto_finalize_cipher_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err)
+void crypto_finalize_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == req)
+ if (engine->cur_req == &req->base)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) {
- if (engine->cur_req_prepared && engine->unprepare_request) {
- ret = engine->unprepare_request(engine, req);
+ if (engine->cur_req_prepared &&
+ engine->unprepare_cipher_request) {
+ ret = engine->unprepare_cipher_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
}
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->cur_req = NULL;
+ engine->cur_req_prepared = false;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
+
+ req->base.complete(&req->base, err);
+ queue_kthread_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
+
+/**
+ * crypto_finalize_hash_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, int err)
+{
+ unsigned long flags;
+ bool finalize_cur_req = false;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == &req->base)
+ finalize_cur_req = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (finalize_cur_req) {
+ if (engine->cur_req_prepared &&
+ engine->unprepare_hash_request) {
+ ret = engine->unprepare_hash_request(engine, req);
+ if (ret)
+ pr_err("failed to unprepare request\n");
+ }
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
@@ -212,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine,
queue_kthread_work(&engine->kworker, &engine->pump_requests);
}
-EXPORT_SYMBOL_GPL(crypto_finalize_request);
+EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
* crypto_engine_start - start the hardware engine
@@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start);
int crypto_engine_stop(struct crypto_engine *engine)
{
unsigned long flags;
- unsigned limit = 500;
+ unsigned int limit = 500;
int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index f752da3a7c75..fb33f7d3b052 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
goto err;
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
- if (!drbg->Vbuf)
+ if (!drbg->Vbuf) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
- if (!drbg->Cbuf)
+ if (!drbg->Cbuf) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
@@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (0 < sb_size) {
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
- if (!drbg->scratchpadbuf)
+ if (!drbg->scratchpadbuf) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
@@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void)
return -ENOMEM;
mutex_init(&drbg->drbg_mutex);
+ drbg->core = &drbg_cores[coreref];
+ drbg->reseed_threshold = drbg_max_requests(drbg);
/*
* if the following tests fail, it is likely that there is a buffer
@@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void)
* grave bug.
*/
- /* get a valid instance of DRBG for following tests */
- ret = drbg_instantiate(drbg, NULL, coreref, pr);
- if (ret) {
- rc = ret;
- goto outbuf;
- }
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
@@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void)
/* overflow max_bits */
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
BUG_ON(0 < len);
- drbg_uninstantiate(drbg);
/* overflow max addtllen with personalization string */
- ret = drbg_instantiate(drbg, &addtl, coreref, pr);
+ ret = drbg_seed(drbg, &addtl, false);
BUG_ON(0 == ret);
/* all tests passed */
rc = 0;
@@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void)
pr_devel("DRBG: Sanity tests for failure code paths successfully "
"completed\n");
- drbg_uninstantiate(drbg);
-outbuf:
- kzfree(drbg);
+ kfree(drbg);
return rc;
}
@@ -2006,7 +2005,7 @@ static int __init drbg_init(void)
{
unsigned int i = 0; /* pointer to drbg_algs */
unsigned int j = 0; /* pointer to drbg_cores */
- int ret = -EFAULT;
+ int ret;
ret = drbg_healthcheck_sanity();
if (ret)
@@ -2016,7 +2015,7 @@ static int __init drbg_init(void)
pr_info("DRBG: Cannot register all DRBG types"
"(slots needed: %zu, slots available: %zu)\n",
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
- return ret;
+ return -EFAULT;
}
/*
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 70a892e87ccb..f624ac98c94e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
- u8 iv[8];
+ u8 iv[16];
struct crypto_gcm_setkey_result result;
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index bac70995e064..12ad3e3a84e3 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -14,24 +14,13 @@
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
+#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
-
-struct ghash_ctx {
- struct gf128mul_4k *gf128;
-};
-
-struct ghash_desc_ctx {
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
-};
-
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 86fb59b109a9..94ee44acd465 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
int ahash_mcryptd_digest(struct ahash_request *desc)
{
- int err;
-
- err = crypto_ahash_init(desc) ?:
- ahash_mcryptd_finup(desc);
-
- return err;
+ return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
}
int ahash_mcryptd_update(struct ahash_request *desc)
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 4df6451e7543..0b66dc824606 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
n_sz--;
}
- /* In FIPS mode only allow key size 2K & 3K */
- if (n_sz != 256 && n_sz != 384) {
+ /* In FIPS mode only allow key size 2K and higher */
+ if (n_sz < 256) {
pr_err("RSA: key size not allowed in FIPS mode\n");
return -EINVAL;
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5c9d5a5e7b65..62dffa0028ac 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
+ const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
req = *preq;
statesize = crypto_ahash_statesize(
crypto_ahash_reqtfm(req));
- state = kmalloc(statesize, GFP_KERNEL);
+ state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
if (!state) {
pr_err("alt: hash: Failed to alloc state for %s\n", algo);
goto out_nostate;
}
+ memcpy(state + statesize, guard, sizeof(guard));
ret = crypto_ahash_export(req, state);
+ WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alt: hash: Failed to export() for %s\n", algo);
goto out;
@@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
@@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
@@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
if (template[i].np)
continue;
+ if (fips_enabled && template[i].fips_skip)
+ continue;
+
j++;
ret = -EINVAL;
@@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
printk(KERN_ERR "alg: cipher: setkey failed "
"on test %d for %s: flags=%x\n", j,
algo, crypto_cipher_get_flags(tfm));
@@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (template[i].np && !template[i].also_non_np)
continue;
+ if (fips_enabled && template[i].fips_skip)
+ continue;
+
if (template[i].iv)
memcpy(iv, template[i].iv, ivsize);
else
@@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
goto out;
@@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (!template[i].np)
continue;
+ if (fips_enabled && template[i].fips_skip)
+ continue;
+
if (template[i].iv)
memcpy(iv, template[i].iv, ivsize);
else
@@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
goto out;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index acb6bbff781a..e64a4ef9d8ca 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -59,6 +59,7 @@ struct hash_testvec {
* @tap: How to distribute data in @np SGs
* @also_non_np: if set to 1, the test will be also done without
* splitting data in @np SGs
+ * @fips_skip: Skip the test vector in FIPS mode
*/
struct cipher_testvec {
@@ -75,6 +76,7 @@ struct cipher_testvec {
unsigned char klen;
unsigned short ilen;
unsigned short rlen;
+ bool fips_skip;
};
struct aead_testvec {
@@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32,
+ .fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32,
+ .fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
diff --git a/crypto/xor.c b/crypto/xor.c
index 35d6b3adf230..263af9fb45ea 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -24,6 +24,10 @@
#include <linux/preempt.h>
#include <asm/xor.h>
+#ifndef XOR_SELECT_TEMPLATE
+#define XOR_SELECT_TEMPLATE(x) (x)
+#endif
+
/* The xor routines to use. */
static struct xor_block_template *active_template;
@@ -109,6 +113,15 @@ calibrate_xor_blocks(void)
void *b1, *b2;
struct xor_block_template *f, *fastest;
+ fastest = XOR_SELECT_TEMPLATE(NULL);
+
+ if (fastest) {
+ printk(KERN_INFO "xor: automatically using best "
+ "checksumming function %-10s\n",
+ fastest->name);
+ goto out;
+ }
+
/*
* Note: Since the memory is not actually used for _anything_ but to
* test the XOR speed, we don't really want kmemcheck to warn about
@@ -126,36 +139,22 @@ calibrate_xor_blocks(void)
* all the possible functions, just test the best one
*/
- fastest = NULL;
-
-#ifdef XOR_SELECT_TEMPLATE
- fastest = XOR_SELECT_TEMPLATE(fastest);
-#endif
-
#define xor_speed(templ) do_xor_speed((templ), b1, b2)
- if (fastest) {
- printk(KERN_INFO "xor: automatically using best "
- "checksumming function:\n");
- xor_speed(fastest);
- goto out;
- } else {
- printk(KERN_INFO "xor: measuring software checksum speed\n");
- XOR_TRY_TEMPLATES;
- fastest = template_list;
- for (f = fastest; f; f = f->next)
- if (f->speed > fastest->speed)
- fastest = f;
- }
+ printk(KERN_INFO "xor: measuring software checksum speed\n");
+ XOR_TRY_TEMPLATES;
+ fastest = template_list;
+ for (f = fastest; f; f = f->next)
+ if (f->speed > fastest->speed)
+ fastest = f;
printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
fastest->name, fastest->speed / 1000, fastest->speed % 1000);
#undef xor_speed
- out:
free_pages((unsigned long)b1, 2);
-
+out:
active_template = fastest;
return 0;
}
diff --git a/crypto/xts.c b/crypto/xts.c
index 26ba5833b994..305343f22a02 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -5,7 +5,7 @@
*
* Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
*
- * Based om ecb.c
+ * Based on ecb.c
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it