summaryrefslogtreecommitdiff
path: root/drivers/crypto/ccree
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-28 04:06:49 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-28 04:06:49 +0300
commitd075c0c1be279c5f4c6688ac0442fff6494e56bc (patch)
treec3e3ab6b35139229ad0a5096ccea0c00eb97998b /drivers/crypto/ccree
parentbf272460d744112bacd4c4d562592decbf0edf64 (diff)
parente4e62bbc6aba49a5edb3156ec65f6698ff37d228 (diff)
downloadlinux-d075c0c1be279c5f4c6688ac0442fff6494e56bc.tar.xz
Merge tag 'v5.19-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Test in-place en/decryption with two sglists in testmgr - Fix process vs softirq race in cryptd Algorithms: - Add arm64 acceleration for sm4 - Add s390 acceleration for chacha20 Drivers: - Add polarfire soc hwrng support in mpsf - Add support for TI SoC AM62x in sa2ul - Add support for ATSHA204 cryptochip in atmel-sha204a - Add support for PRNG in caam - Restore support for storage encryption in qat - Restore support for storage encryption in hisilicon/sec" * tag 'v5.19-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits) hwrng: omap3-rom - fix using wrong clk_disable() in omap_rom_rng_runtime_resume() crypto: hisilicon/sec - delete the flag CRYPTO_ALG_ALLOCATES_MEMORY crypto: qat - add support for 401xx devices crypto: qat - re-enable registration of algorithms crypto: qat - honor CRYPTO_TFM_REQ_MAY_SLEEP flag crypto: qat - add param check for DH crypto: qat - add param check for RSA crypto: qat - remove dma_free_coherent() for DH crypto: qat - remove dma_free_coherent() for RSA crypto: qat - fix memory leak in RSA crypto: qat - add backlog mechanism crypto: qat - refactor submission logic crypto: qat - use pre-allocated buffers in datapath crypto: qat - set to zero DH parameters before free crypto: s390 - add crypto library interface for ChaCha20 crypto: talitos - Uniform coding style with defined variable crypto: octeontx2 - simplify the return expression of otx2_cpt_aead_cbc_aes_sha_setkey() crypto: cryptd - Protect per-CPU resource by disabling BH. crypto: sun8i-ce - do not fallback if cryptlen is less than sg length crypto: sun8i-ce - rework debugging ...
Diffstat (limited to 'drivers/crypto/ccree')
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c27
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
2 files changed, 28 insertions, 23 deletions
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 11e0278c8631..6140e4927322 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
req_ctx->mlli_params.mlli_dma_addr);
}
- dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
-
if (src != dst) {
- dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ } else {
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
}
}
@@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
int rc = 0;
u32 mapped_nents = 0;
+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
@@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
/* Map the src SGL */
- rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc)
goto cipher_exit;
@@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
- rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
@@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
@@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
else
size_for_map -= authsize;
- rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
&areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
@@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
size_to_map += authsize;
}
- rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->src, size_to_map,
+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
&areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 790fa9058a36..7d1bee86d581 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -529,24 +529,26 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_req_mgr_err;
}
- /* Allocate crypto algs */
- rc = cc_cipher_alloc(new_drvdata);
+ /* hash must be allocated first due to use of send_request_init()
+ * and dependency of AEAD on it
+ */
+ rc = cc_hash_alloc(new_drvdata);
if (rc) {
- dev_err(dev, "cc_cipher_alloc failed\n");
+ dev_err(dev, "cc_hash_alloc failed\n");
goto post_buf_mgr_err;
}
- /* hash must be allocated before aead since hash exports APIs */
- rc = cc_hash_alloc(new_drvdata);
+ /* Allocate crypto algs */
+ rc = cc_cipher_alloc(new_drvdata);
if (rc) {
- dev_err(dev, "cc_hash_alloc failed\n");
- goto post_cipher_err;
+ dev_err(dev, "cc_cipher_alloc failed\n");
+ goto post_hash_err;
}
rc = cc_aead_alloc(new_drvdata);
if (rc) {
dev_err(dev, "cc_aead_alloc failed\n");
- goto post_hash_err;
+ goto post_cipher_err;
}
/* If we got here and FIPS mode is enabled
@@ -558,10 +560,10 @@ static int init_cc_resources(struct platform_device *plat_dev)
pm_runtime_put(dev);
return 0;
-post_hash_err:
- cc_hash_free(new_drvdata);
post_cipher_err:
cc_cipher_free(new_drvdata);
+post_hash_err:
+ cc_hash_free(new_drvdata);
post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
@@ -593,8 +595,8 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
cc_aead_free(drvdata);
- cc_hash_free(drvdata);
cc_cipher_free(drvdata);
+ cc_hash_free(drvdata);
cc_buffer_mgr_fini(drvdata);
cc_req_mgr_fini(drvdata);
cc_fips_fini(drvdata);