summaryrefslogtreecommitdiff
path: root/drivers/crypto/marvell
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 00:31:29 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 00:31:29 +0300
commit0f1d6dfe03ca4e36132221b918499c6f0b0f048d (patch)
tree0de8e9330610190a23e173ca7d7f3fb74a517aa2 /drivers/crypto/marvell
parentd05c5f7ba164aed3db02fb188c26d0dd94f5455b (diff)
parent04b46fbdea5e31ffd745a34fa61269a69ba9f47a (diff)
downloadlinux-0f1d6dfe03ca4e36132221b918499c6f0b0f048d.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.10: API: - add skcipher walk interface - add asynchronous compression (acomp) interface - fix algif_aed AIO handling of zero buffer Algorithms: - fix unaligned access in poly1305 - fix DRBG output to large buffers Drivers: - add support for iMX6UL to caam - fix givenc descriptors (used by IPsec) in caam - accelerated SHA256/SHA512 for ARM64 from OpenSSL - add SSE CRCT10DIF and CRC32 to ARM/ARM64 - add AEAD support to Chelsio chcr - add Armada 8K support to omap-rng" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (148 commits) crypto: testmgr - fix overlap in chunked tests again crypto: arm/crc32 - accelerated support based on x86 SSE implementation crypto: arm64/crc32 - accelerated support based on x86 SSE implementation crypto: arm/crct10dif - port x86 SSE implementation to ARM crypto: arm64/crct10dif - port x86 SSE implementation to arm64 crypto: testmgr - add/enhance test cases for CRC-T10DIF crypto: testmgr - avoid overlap in chunked tests crypto: chcr - checking for IS_ERR() instead of NULL crypto: caam - check caam_emi_slow instead of re-lookup platform crypto: algif_aead - fix AIO handling of zero buffer crypto: aes-ce - Make aes_simd_algs static crypto: algif_skcipher - set error code when kcalloc fails crypto: caam - make aamalg_desc a proper module crypto: caam - pass key buffers with typesafe pointers crypto: arm64/aes-ce-ccm - Fix AEAD decryption length MAINTAINERS: add crypto headers to crypto entry crypt: doc - remove misleading mention of async API crypto: doc - fix header file name crypto: api - fix comment typo crypto: skcipher - Add separate walker for AEAD decryption ..
Diffstat (limited to 'drivers/crypto/marvell')
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa.h5
-rw-r--r--drivers/crypto/marvell/cipher.c8
-rw-r--r--drivers/crypto/marvell/hash.c65
-rw-r--r--drivers/crypto/marvell/tdma.c33
5 files changed, 75 insertions, 40 deletions
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2a4feb..6e7a5c77a00a 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
- dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
- if (!dma->iv_pool)
- return -ENOMEM;
-
cesa->dma = dma;
return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33decd4..a768da7138a1 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
-#define CESA_TDMA_IV 3
+#define CESA_TDMA_RESULT 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
- struct dma_pool *iv_pool;
};
/**
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags);
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc9614e6e..098871a22a54 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ ivsize);
} else {
memcpy_fromio(ablkreq->info,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
- ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b375b84..317cf029c0cf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->last_req) {
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ __le32 *data = NULL;
+
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
+ * Result is already in the correct endianess when the SA is
+ * used
*/
- if (creq->algo_le) {
- __le32 *result = (void *)ahashreq->result;
+ data = creq->base.chain.last->op->ctx.hash.hash;
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = cpu_to_le32(data[i]);
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_le32(creq->state[i]);
- } else {
- __be32 *result = (void *)ahashreq->result;
+ memcpy(ahashreq->result, data, digsize);
+ } else {
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = readl_relaxed(engine->regs +
+ CESA_IVDIG(i));
+ if (creq->last_req) {
+ /*
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
+ if (creq->algo_le) {
+ __le32 *result = (void *)ahashreq->result;
+
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_le32(creq->state[i]);
+ } else {
+ __be32 *result = (void *)ahashreq->result;
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_be32(creq->state[i]);
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_be32(creq->state[i]);
+ }
}
}
@@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
+ ret = mv_cesa_dma_add_result_op(chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
return op;
}
@@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
+ u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
@@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
goto err_free_tdma;
}
- if (op) {
+ /*
+ * If results are copied via DMA, this means that this
+ * request can be directly processed by the engine,
+ * without partial updates. So we can chain it at the
+ * DMA level with other requests.
+ */
+ type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+ if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
@@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
- basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
- CESA_TDMA_BREAK_CHAIN);
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+ if (type != CESA_TDMA_RESULT)
+ basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
return 0;
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5fbaa1b..4416b88eca70 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
- else if (type == CESA_TDMA_IV)
- dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
- le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
-
- struct mv_cesa_tdma_desc *tdma;
- u8 *iv;
- dma_addr_t dma_handle;
+ struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
- iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
- if (!iv)
- return -ENOMEM;
+ /* We re-use an existing op_desc object to retrieve the context
+ * and result instead of allocating a new one.
+ * There is at least one object of this type in a CESA crypto
+ * req, just pick the first one in the chain.
+ */
+ for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+ u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+ if (type == CESA_TDMA_OP)
+ break;
+ }
+
+ if (!op_desc)
+ return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = src;
- tdma->dst = cpu_to_le32(dma_handle);
- tdma->data = iv;
+ tdma->dst = op_desc->src;
+ tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
- tdma->flags = flags | CESA_TDMA_IV;
+ tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}