summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorHui Tang <tanghui20@huawei.com>2021-03-05 09:35:02 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-10-26 14:25:42 +0300
commita91af50850270554a4ff032a12eaa4c2fed0dd44 (patch)
treef7add2d9131231598c479acf168e8bc0d6ec1d92 /drivers/crypto
parent8a4ed09ed816a434c942fc7026651a3b99680612 (diff)
downloadlinux-a91af50850270554a4ff032a12eaa4c2fed0dd44.tar.xz
crypto: qat - fix use of 'dma_map_single'
[ Upstream commit 7cc05071f930a631040fea16a41f9d78771edc49 ] DMA_TO_DEVICE synchronisation must be done after the last modification of the memory region by the software and before it is handed off to the device. Signed-off-by: Hui Tang <tanghui20@huawei.com> Reported-by: kernel test robot <lkp@intel.com> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Stable-dep-of: cf5bb835b7c8 ("crypto: qat - fix DMA transfer direction") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 06abe1e2074e..8625e299d445 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -669,8 +669,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp;
- dma_addr_t bloutp;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@@ -685,10 +685,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
-
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -704,6 +700,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -723,9 +722,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
for_each_sg(sglout, sg, n, i)
bufers[i].addr = DMA_MAPPING_ERROR;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -742,6 +738,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
}
buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
qat_req->buf.sz_out = sz_out;
@@ -753,17 +752,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
return 0;
err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
n = sg_nents(sglout);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -771,8 +774,6 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");