summaryrefslogtreecommitdiff
path: root/drivers/crypto/starfive
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/starfive')
-rw-r--r--drivers/crypto/starfive/Kconfig4
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c597
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c43
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h10
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c275
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c14
6 files changed, 521 insertions, 422 deletions
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index cb59357b58b2..0fe389e9f932 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -14,6 +14,10 @@ config CRYPTO_DEV_JH7110
select CRYPTO_RSA
select CRYPTO_AES
select CRYPTO_CCM
+ select CRYPTO_GCM
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
help
Support for StarFive JH7110 crypto hardware acceleration engine.
This module provides acceleration for public key algo,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 1ac15cc4ef3c..86a1a1fa9f8f 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -78,7 +78,7 @@ static inline int is_gcm(struct starfive_cryp_dev *cryp)
return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM;
}
-static inline int is_encrypt(struct starfive_cryp_dev *cryp)
+static inline bool is_encrypt(struct starfive_cryp_dev *cryp)
{
return cryp->flags & FLG_ENCRYPT;
}
@@ -103,16 +103,6 @@ static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mod
}
}
-static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx)
-{
- struct starfive_cryp_dev *cryp = ctx->cryp;
-
- if (is_gcm(cryp))
- writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
- else
- writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN);
-}
-
static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -261,7 +251,6 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
rctx->csr.aes.mode = hw_mode;
rctx->csr.aes.cmode = !is_encrypt(cryp);
- rctx->csr.aes.ie = 1;
rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
if (cryp->side_chan) {
@@ -279,7 +268,7 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
case STARFIVE_AES_MODE_GCM:
starfive_aes_set_alen(ctx);
starfive_aes_set_mlen(ctx);
- starfive_aes_set_ivlen(ctx);
+ writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
starfive_aes_aead_hw_start(ctx, hw_mode);
starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv);
break;
@@ -300,52 +289,49 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
return cryp->err;
}
-static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp)
+static int starfive_aes_read_authtag(struct starfive_cryp_ctx *ctx)
{
- int i, start_addr;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ int i;
if (starfive_aes_wait_busy(cryp))
return dev_err_probe(cryp->dev, -ETIMEDOUT,
"Timeout waiting for tag generation.");
- start_addr = STARFIVE_AES_NONCE0;
-
- if (is_gcm(cryp))
- for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4)
- cryp->tag_out[i] = readl(cryp->base + start_addr);
- else
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM) {
+ cryp->tag_out[0] = readl(cryp->base + STARFIVE_AES_NONCE0);
+ cryp->tag_out[1] = readl(cryp->base + STARFIVE_AES_NONCE1);
+ cryp->tag_out[2] = readl(cryp->base + STARFIVE_AES_NONCE2);
+ cryp->tag_out[3] = readl(cryp->base + STARFIVE_AES_NONCE3);
+ } else {
for (i = 0; i < AES_BLOCK_32; i++)
cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+ }
if (is_encrypt(cryp)) {
- scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1);
+ scatterwalk_map_and_copy(cryp->tag_out, rctx->out_sg,
+ cryp->total_in, cryp->authsize, 1);
} else {
- scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0);
-
if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize))
- return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n");
+ return -EBADMSG;
}
return 0;
}
-static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
+static void starfive_aes_finish_req(struct starfive_cryp_ctx *ctx)
{
- union starfive_aes_csr csr;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
int err = cryp->err;
if (!err && cryp->authsize)
- err = starfive_aes_read_authtag(cryp);
+ err = starfive_aes_read_authtag(ctx);
if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC ||
(cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR))
starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv);
- /* reset irq flags*/
- csr.v = 0;
- csr.aesrst = 1;
- writel(csr.v, cryp->base + STARFIVE_AES_CSR);
-
if (cryp->authsize)
crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err);
else
@@ -353,39 +339,6 @@ static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
err);
}
-void starfive_aes_done_task(unsigned long param)
-{
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int i;
-
- for (i = 0; i < AES_BLOCK_32; i++)
- block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
-
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_out), 1);
-
- cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out);
-
- if (!cryp->total_out) {
- starfive_aes_finish_req(cryp);
- return;
- }
-
- memset(block, 0, AES_BLOCK_SIZE);
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
-}
-
static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -451,60 +404,165 @@ static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx)
return 0;
}
-static int starfive_aes_prepare_req(struct skcipher_request *req,
- struct aead_request *areq)
+static void starfive_aes_dma_done(void *param)
{
- struct starfive_cryp_ctx *ctx;
- struct starfive_cryp_request_ctx *rctx;
- struct starfive_cryp_dev *cryp;
+ struct starfive_cryp_dev *cryp = param;
- if (!req && !areq)
- return -EINVAL;
+ complete(&cryp->dma_done);
+}
- ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
- crypto_aead_ctx(crypto_aead_reqtfm(areq));
+static void starfive_aes_dma_init(struct starfive_cryp_dev *cryp)
+{
+ cryp->cfg_in.direction = DMA_MEM_TO_DEV;
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+ cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
- cryp = ctx->cryp;
- rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+ dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
- if (req) {
- cryp->req.sreq = req;
- cryp->total_in = req->cryptlen;
- cryp->total_out = req->cryptlen;
- cryp->assoclen = 0;
- cryp->authsize = 0;
- } else {
- cryp->req.areq = areq;
- cryp->assoclen = areq->assoclen;
- cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
- if (is_encrypt(cryp)) {
- cryp->total_in = areq->cryptlen;
- cryp->total_out = areq->cryptlen;
- } else {
- cryp->total_in = areq->cryptlen - cryp->authsize;
- cryp->total_out = cryp->total_in;
- }
- }
+ cryp->cfg_out.direction = DMA_DEV_TO_MEM;
+ cryp->cfg_out.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_out.dst_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+ cryp->cfg_out.src_maxburst = 4;
+ cryp->cfg_out.dst_maxburst = 4;
+ cryp->cfg_out.src_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
- rctx->in_sg = req ? req->src : areq->src;
- scatterwalk_start(&cryp->in_walk, rctx->in_sg);
+ dmaengine_slave_config(cryp->rx, &cryp->cfg_out);
- rctx->out_sg = req ? req->dst : areq->dst;
- scatterwalk_start(&cryp->out_walk, rctx->out_sg);
+ init_completion(&cryp->dma_done);
+}
- if (cryp->assoclen) {
- rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
- if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+static int starfive_aes_dma_xfer(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ int len)
+{
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+ union starfive_alg_cr alg_cr;
+ int ret = 0, in_save, out_save;
+
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.aes_dma_en = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ in_save = sg_dma_len(src);
+ out_save = sg_dma_len(dst);
- scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0);
- scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2);
+ writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+ writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_OUT_LEN_OFFSET);
+
+ sg_dma_len(src) = ALIGN(len, AES_BLOCK_SIZE);
+ sg_dma_len(dst) = ALIGN(len, AES_BLOCK_SIZE);
+
+ out_desc = dmaengine_prep_slave_sg(cryp->rx, dst, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!out_desc) {
+ ret = -EINVAL;
+ goto dma_err;
}
- ctx->rctx = rctx;
+ out_desc->callback = starfive_aes_dma_done;
+ out_desc->callback_param = cryp;
+
+ reinit_completion(&cryp->dma_done);
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(cryp->rx);
+
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, src, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc) {
+ ret = -EINVAL;
+ goto dma_err;
+ }
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ if (!wait_for_completion_timeout(&cryp->dma_done,
+ msecs_to_jiffies(1000)))
+ ret = -ETIMEDOUT;
+
+dma_err:
+ sg_dma_len(src) = in_save;
+ sg_dma_len(dst) = out_save;
+
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ return ret;
+}
+
+static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct scatterlist *stsg, *dtsg;
+ struct scatterlist _src[2], _dst[2];
+ unsigned int remain = cryp->total_in;
+ unsigned int len, src_nents, dst_nents;
+ int ret;
+
+ if (src == dst) {
+ for (stsg = src, dtsg = dst; remain > 0;
+ stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
+ src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg error\n");
+
+ dst_nents = src_nents;
+ len = min(sg_dma_len(stsg), remain);
+
+ ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len);
+ dma_unmap_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
+
+ remain -= len;
+ }
+ } else {
+ for (stsg = src, dtsg = dst;;) {
+ src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg src error\n");
+
+ dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
+ if (dst_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg dst error\n");
+
+ len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
+ len = min(len, remain);
+
+ ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len);
+ dma_unmap_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ remain -= len;
+ if (remain == 0)
+ break;
+
+ if (sg_dma_len(stsg) - len) {
+ stsg = scatterwalk_ffwd(_src, stsg, len);
+ dtsg = sg_next(dtsg);
+ } else if (sg_dma_len(dtsg) - len) {
+ dtsg = scatterwalk_ffwd(_dst, dtsg, len);
+ stsg = sg_next(stsg);
+ } else {
+ stsg = sg_next(stsg);
+ dtsg = sg_next(dtsg);
+ }
+ }
+ }
- return starfive_aes_hw_init(ctx);
+ return 0;
}
static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
@@ -513,35 +571,42 @@ static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
container_of(areq, struct skcipher_request, base);
struct starfive_cryp_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct starfive_cryp_request_ctx *rctx = skcipher_request_ctx(req);
struct starfive_cryp_dev *cryp = ctx->cryp;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int err;
- int i;
+ int ret;
- err = starfive_aes_prepare_req(req, NULL);
- if (err)
- return err;
+ cryp->req.sreq = req;
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ cryp->assoclen = 0;
+ cryp->authsize = 0;
- /*
- * Write first plain/ciphertext block to start the module
- * then let irq tasklet handle the rest of the data blocks.
- */
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+
+ ctx->rctx = rctx;
+
+ ret = starfive_aes_hw_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+ if (!cryp->total_in)
+ goto finish_req;
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ starfive_aes_dma_init(cryp);
+
+ ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg);
+ if (ret)
+ return ret;
+
+finish_req:
+ starfive_aes_finish_req(ctx);
return 0;
}
-static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
+static int starfive_aes_init_tfm(struct crypto_skcipher *tfm,
+ const char *alg_name)
{
struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -549,12 +614,26 @@ static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
if (!ctx->cryp)
return -ENODEV;
+ ctx->skcipher_fbk = crypto_alloc_skcipher(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->skcipher_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->skcipher_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg_name);
+
crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
- sizeof(struct skcipher_request));
+ crypto_skcipher_reqsize(ctx->skcipher_fbk));
return 0;
}
+static void starfive_aes_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->skcipher_fbk);
+}
+
static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req =
@@ -562,79 +641,99 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
struct starfive_cryp_ctx *ctx =
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
- struct starfive_cryp_request_ctx *rctx;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int err;
- int i;
+ struct starfive_cryp_request_ctx *rctx = aead_request_ctx(req);
+ struct scatterlist _src[2], _dst[2];
+ int ret;
+
+ cryp->req.areq = req;
+ cryp->assoclen = req->assoclen;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ rctx->in_sg = scatterwalk_ffwd(_src, req->src, cryp->assoclen);
+ if (req->src == req->dst)
+ rctx->out_sg = rctx->in_sg;
+ else
+ rctx->out_sg = scatterwalk_ffwd(_dst, req->dst, cryp->assoclen);
+
+ if (is_encrypt(cryp)) {
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ } else {
+ cryp->total_in = req->cryptlen - cryp->authsize;
+ cryp->total_out = cryp->total_in;
+ scatterwalk_map_and_copy(cryp->tag_in, req->src,
+ cryp->total_in + cryp->assoclen,
+ cryp->authsize, 0);
+ }
- err = starfive_aes_prepare_req(NULL, req);
- if (err)
- return err;
+ if (cryp->assoclen) {
+ rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
+ if (!rctx->adata)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "Failed to alloc memory for adata");
+
+ if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
+ rctx->adata, cryp->assoclen) != cryp->assoclen)
+ return -EINVAL;
+ }
+
+ if (cryp->total_in)
+ sg_zero_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
+ sg_dma_len(rctx->in_sg) - cryp->total_in,
+ cryp->total_in);
- rctx = ctx->rctx;
+ ctx->rctx = rctx;
+
+ ret = starfive_aes_hw_init(ctx);
+ if (ret)
+ return ret;
if (!cryp->assoclen)
goto write_text;
if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM)
- cryp->err = starfive_aes_ccm_write_adata(ctx);
+ ret = starfive_aes_ccm_write_adata(ctx);
else
- cryp->err = starfive_aes_gcm_write_adata(ctx);
+ ret = starfive_aes_gcm_write_adata(ctx);
kfree(rctx->adata);
- if (cryp->err)
- return cryp->err;
+ if (ret)
+ return ret;
write_text:
if (!cryp->total_in)
goto finish_req;
- /*
- * Write first plain/ciphertext block to start the module
- * then let irq tasklet handle the rest of the data blocks.
- */
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ starfive_aes_dma_init(cryp);
- return 0;
+ ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg);
+ if (ret)
+ return ret;
finish_req:
- starfive_aes_finish_req(cryp);
+ starfive_aes_finish_req(ctx);
return 0;
}
-static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm)
+static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm,
+ const char *alg_name)
{
struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
- struct starfive_cryp_dev *cryp = ctx->cryp;
- struct crypto_tfm *aead = crypto_aead_tfm(tfm);
- struct crypto_alg *alg = aead->__crt_alg;
ctx->cryp = starfive_cryp_find_dev(ctx);
if (!ctx->cryp)
return -ENODEV;
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->aead_fbk))
- return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk),
- "%s() failed to allocate fallback for %s\n",
- __func__, alg->cra_name);
- }
+ ctx->aead_fbk = crypto_alloc_aead(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->aead_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg_name);
- crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) +
- sizeof(struct aead_request));
+ crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ crypto_aead_reqsize(ctx->aead_fbk));
return 0;
}
@@ -646,6 +745,46 @@ static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->aead_fbk);
}
+static bool starfive_aes_check_unaligned(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct scatterlist *tsg;
+ int i;
+
+ for_each_sg(src, tsg, sg_nents(src), i)
+ if (!IS_ALIGNED(tsg->offset, sizeof(u32)) ||
+ (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) &&
+ !sg_is_last(tsg)))
+ return true;
+
+ if (src != dst)
+ for_each_sg(dst, tsg, sg_nents(dst), i)
+ if (!IS_ALIGNED(tsg->offset, sizeof(u32)) ||
+ (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) &&
+ !sg_is_last(tsg)))
+ return true;
+
+ return false;
+}
+
+static int starfive_aes_do_fallback(struct skcipher_request *req, bool enc)
+{
+ struct starfive_cryp_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(subreq, ctx->skcipher_fbk);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+}
+
static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -660,32 +799,54 @@ static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
if (req->cryptlen & blocksize_align)
return -EINVAL;
+ if (starfive_aes_check_unaligned(cryp, req->src, req->dst))
+ return starfive_aes_do_fallback(req, is_encrypt(cryp));
+
return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
}
+static int starfive_aes_aead_do_fallback(struct aead_request *req, bool enc)
+{
+ struct starfive_cryp_ctx *ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->aead_fbk);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags)
{
struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct scatterlist *src, *dst, _src[2], _dst[2];
cryp->flags = flags;
- /*
- * HW engine could not perform CCM tag verification on
- * non-blocksize aligned text, use fallback algo instead
+ /* aes-ccm does not support tag verification for non-aligned text,
+ * use fallback for ccm decryption instead.
*/
- if (ctx->aead_fbk && !is_encrypt(cryp)) {
- struct aead_request *subreq = aead_request_ctx(req);
+ if (((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) &&
+ !is_encrypt(cryp))
+ return starfive_aes_aead_do_fallback(req, 0);
- aead_request_set_tfm(subreq, ctx->aead_fbk);
- aead_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src,
- req->dst, req->cryptlen, req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ src = scatterwalk_ffwd(_src, req->src, req->assoclen);
- return crypto_aead_decrypt(subreq);
- }
+ if (req->src == req->dst)
+ dst = src;
+ else
+ dst = scatterwalk_ffwd(_dst, req->dst, req->assoclen);
+
+ if (starfive_aes_check_unaligned(cryp, src, dst))
+ return starfive_aes_aead_do_fallback(req, is_encrypt(cryp));
return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
@@ -706,7 +867,7 @@ static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- return 0;
+ return crypto_skcipher_setkey(ctx->skcipher_fbk, key, keylen);
}
static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -725,16 +886,20 @@ static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- if (ctx->aead_fbk)
- return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
-
- return 0;
+ return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
}
static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- return crypto_gcm_check_authsize(authsize);
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize);
+ if (ret)
+ return ret;
+
+ return crypto_aead_setauthsize(ctx->aead_fbk, authsize);
}
static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm,
@@ -820,9 +985,35 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req)
return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM);
}
+static int starfive_aes_ecb_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "ecb(aes-generic)");
+}
+
+static int starfive_aes_cbc_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "cbc(aes-generic)");
+}
+
+static int starfive_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "ctr(aes-generic)");
+}
+
+static int starfive_aes_ccm_init_tfm(struct crypto_aead *tfm)
+{
+ return starfive_aes_aead_init_tfm(tfm, "ccm_base(ctr(aes-generic),cbcmac(aes-generic))");
+}
+
+static int starfive_aes_gcm_init_tfm(struct crypto_aead *tfm)
+{
+ return starfive_aes_aead_init_tfm(tfm, "gcm_base(ctr(aes-generic),ghash-generic)");
+}
+
static struct skcipher_engine_alg skcipher_algs[] = {
{
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_ecb_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_ecb_encrypt,
.base.decrypt = starfive_aes_ecb_decrypt,
@@ -832,7 +1023,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "starfive-ecb-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -842,7 +1034,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.do_one_request = starfive_aes_do_one_req,
},
}, {
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_cbc_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_cbc_encrypt,
.base.decrypt = starfive_aes_cbc_decrypt,
@@ -853,7 +1046,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "starfive-cbc-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -863,7 +1057,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.do_one_request = starfive_aes_do_one_req,
},
}, {
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_ctr_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_ctr_encrypt,
.base.decrypt = starfive_aes_ctr_decrypt,
@@ -874,7 +1069,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "starfive-ctr-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -892,7 +1088,7 @@ static struct aead_engine_alg aead_algs[] = {
.base.setauthsize = starfive_aes_gcm_setauthsize,
.base.encrypt = starfive_aes_gcm_encrypt,
.base.decrypt = starfive_aes_gcm_decrypt,
- .base.init = starfive_aes_aead_init_tfm,
+ .base.init = starfive_aes_gcm_init_tfm,
.base.exit = starfive_aes_aead_exit_tfm,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
@@ -900,7 +1096,8 @@ static struct aead_engine_alg aead_algs[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "starfive-gcm-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -914,7 +1111,7 @@ static struct aead_engine_alg aead_algs[] = {
.base.setauthsize = starfive_aes_ccm_setauthsize,
.base.encrypt = starfive_aes_ccm_encrypt,
.base.decrypt = starfive_aes_ccm_decrypt,
- .base.init = starfive_aes_aead_init_tfm,
+ .base.init = starfive_aes_ccm_init_tfm,
.base.exit = starfive_aes_aead_exit_tfm,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 425fddf3a8ab..e4dfed7ee0b0 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -89,34 +89,10 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
dma_release_channel(cryp->rx);
}
-static irqreturn_t starfive_cryp_irq(int irq, void *priv)
-{
- u32 status;
- u32 mask;
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
-
- mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
- if (status & STARFIVE_IE_FLAG_AES_DONE) {
- mask |= STARFIVE_IE_MASK_AES_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- tasklet_schedule(&cryp->aes_done);
- }
-
- if (status & STARFIVE_IE_FLAG_HASH_DONE) {
- mask |= STARFIVE_IE_MASK_HASH_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- tasklet_schedule(&cryp->hash_done);
- }
-
- return IRQ_HANDLED;
-}
-
static int starfive_cryp_probe(struct platform_device *pdev)
{
struct starfive_cryp_dev *cryp;
struct resource *res;
- int irq;
int ret;
cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
@@ -131,9 +107,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
"Error remapping memory for platform device\n");
- tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
- tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
-
cryp->phys_base = res->start;
cryp->dma_maxburst = 32;
cryp->side_chan = side_chan;
@@ -153,16 +126,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
"Error getting hardware reset line\n");
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
- (void *)cryp);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "Failed to register interrupt handler\n");
-
clk_prepare_enable(cryp->hclk);
clk_prepare_enable(cryp->ahb);
reset_control_deassert(cryp->rst);
@@ -219,9 +182,6 @@ err_dma_init:
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
- tasklet_kill(&cryp->aes_done);
- tasklet_kill(&cryp->hash_done);
-
return ret;
}
@@ -233,9 +193,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
- tasklet_kill(&cryp->aes_done);
- tasklet_kill(&cryp->hash_done);
-
crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index 6cdf6db5d904..494a74f52706 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -91,6 +91,7 @@ union starfive_hash_csr {
#define STARFIVE_HASH_KEY_DONE BIT(13)
u32 key_done :1;
u32 key_flag :1;
+#define STARFIVE_HASH_HMAC_DONE BIT(15)
u32 hmac_done :1;
#define STARFIVE_HASH_BUSY BIT(16)
u32 busy :1;
@@ -168,6 +169,7 @@ struct starfive_cryp_ctx {
struct crypto_akcipher *akcipher_fbk;
struct crypto_ahash *ahash_fbk;
struct crypto_aead *aead_fbk;
+ struct crypto_skcipher *skcipher_fbk;
};
struct starfive_cryp_dev {
@@ -185,11 +187,8 @@ struct starfive_cryp_dev {
struct dma_chan *rx;
struct dma_slave_config cfg_in;
struct dma_slave_config cfg_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
struct crypto_engine *engine;
- struct tasklet_struct aes_done;
- struct tasklet_struct hash_done;
+ struct completion dma_done;
size_t assoclen;
size_t total_in;
size_t total_out;
@@ -236,7 +235,4 @@ void starfive_rsa_unregister_algs(void);
int starfive_aes_register_algs(void);
void starfive_aes_unregister_algs(void);
-
-void starfive_hash_done_task(unsigned long param);
-void starfive_aes_done_task(unsigned long param);
#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index b6d1808012ca..2c60a1047bc3 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -36,15 +36,22 @@
#define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE
#define STARFIVE_HASH_RESET 0x2
-static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx)
+static inline int starfive_hash_wait_busy(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_dev *cryp = ctx->cryp;
u32 status;
return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
!(status & STARFIVE_HASH_BUSY), 10, 100000);
}
+static inline int starfive_hash_wait_hmac_done(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ (status & STARFIVE_HASH_HMAC_DONE), 10, 100000);
+}
+
static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -84,64 +91,26 @@ static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx)
return 0;
}
-static void starfive_hash_start(void *param)
+static void starfive_hash_start(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_ctx *ctx = param;
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
- union starfive_alg_cr alg_cr;
union starfive_hash_csr csr;
- u32 stat;
-
- dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
-
- alg_cr.v = 0;
- alg_cr.clear = 1;
-
- writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR);
csr.firstb = 0;
csr.final = 1;
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_HASH_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR);
}
-static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
+static void starfive_hash_dma_callback(void *param)
{
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
- struct dma_async_tx_descriptor *in_desc;
- union starfive_alg_cr alg_cr;
- int total_len;
- int ret;
-
- if (!rctx->total) {
- starfive_hash_start(ctx);
- return 0;
- }
+ struct starfive_cryp_dev *cryp = param;
- writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
-
- total_len = rctx->total;
- total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len;
- sg_dma_len(rctx->in_sg) = total_len;
-
- alg_cr.v = 0;
- alg_cr.start = 1;
- alg_cr.hash_dma_en = 1;
-
- writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
-
- ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
- if (!ret)
- return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n");
+ complete(&cryp->dma_done);
+}
- cryp->cfg_in.direction = DMA_MEM_TO_DEV;
- cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+static void starfive_hash_dma_init(struct starfive_cryp_dev *cryp)
+{
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
@@ -149,50 +118,48 @@ static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
- in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg,
- ret, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (!in_desc)
- return -EINVAL;
-
- in_desc->callback = starfive_hash_start;
- in_desc->callback_param = ctx;
-
- dmaengine_submit(in_desc);
- dma_async_issue_pending(cryp->tx);
-
- return 0;
+ init_completion(&cryp->dma_done);
}
-static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx)
+static int starfive_hash_dma_xfer(struct starfive_cryp_dev *cryp,
+ struct scatterlist *sg)
{
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct dma_async_tx_descriptor *in_desc;
+ union starfive_alg_cr alg_cr;
int ret = 0;
- rctx->csr.hash.v = 0;
- rctx->csr.hash.reset = 1;
- writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
-
- if (starfive_hash_wait_busy(ctx))
- return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n");
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.hash_dma_en = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
- rctx->csr.hash.v = 0;
- rctx->csr.hash.mode = ctx->hash_mode;
- rctx->csr.hash.ie = 1;
+ writel(sg_dma_len(sg), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+ sg_dma_len(sg) = ALIGN(sg_dma_len(sg), sizeof(u32));
- if (ctx->is_hmac) {
- ret = starfive_hash_hmac_key(ctx);
- if (ret)
- return ret;
- } else {
- rctx->csr.hash.start = 1;
- rctx->csr.hash.firstb = 1;
- writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, sg, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc) {
+ ret = -EINVAL;
+ goto end;
}
- return starfive_hash_xmit_dma(ctx);
+ reinit_completion(&cryp->dma_done);
+ in_desc->callback = starfive_hash_dma_callback;
+ in_desc->callback_param = cryp;
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ if (!wait_for_completion_timeout(&cryp->dma_done,
+ msecs_to_jiffies(1000)))
+ ret = -ETIMEDOUT;
+
+end:
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ return ret;
}
static int starfive_hash_copy_hash(struct ahash_request *req)
@@ -215,58 +182,74 @@ static int starfive_hash_copy_hash(struct ahash_request *req)
return 0;
}
-void starfive_hash_done_task(unsigned long param)
+static void starfive_hash_done_task(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
int err = cryp->err;
if (!err)
err = starfive_hash_copy_hash(cryp->req.hreq);
- /* Reset to clear hash_done in irq register*/
- writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
-
crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err);
}
-static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align)
+static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
{
- int len = 0;
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct scatterlist *tsg;
+ int ret, src_nents, i;
- if (!total)
- return 0;
+ writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
- if (!IS_ALIGNED(total, align))
- return -EINVAL;
+ if (starfive_hash_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting hardware\n");
- while (sg) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return -EINVAL;
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.mode = ctx->hash_mode;
- if (!IS_ALIGNED(sg->length, align))
- return -EINVAL;
+ if (ctx->is_hmac) {
+ ret = starfive_hash_hmac_key(ctx);
+ if (ret)
+ return ret;
+ } else {
+ rctx->csr.hash.start = 1;
+ rctx->csr.hash.firstb = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ }
+
+ /* No input message, get digest and end. */
+ if (!rctx->total)
+ goto hash_start;
+
+ starfive_hash_dma_init(cryp);
+
+ for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
+ src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg error\n");
- len += sg->length;
- sg = sg_next(sg);
+ ret = starfive_hash_dma_xfer(cryp, tsg);
+ dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
}
- if (len != total)
- return -EINVAL;
+hash_start:
+ starfive_hash_start(cryp);
- return 0;
-}
+ if (starfive_hash_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error generating digest\n");
-static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
-{
- struct ahash_request *req = container_of(areq, struct ahash_request,
- base);
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct starfive_cryp_dev *cryp = ctx->cryp;
+ if (ctx->is_hmac)
+ cryp->err = starfive_hash_wait_hmac_done(cryp);
- if (!cryp)
- return -ENODEV;
+ starfive_hash_done_task(cryp);
- return starfive_hash_xmit(ctx);
+ return 0;
}
static int starfive_hash_init(struct ahash_request *req)
@@ -337,22 +320,6 @@ static int starfive_hash_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->ahash_fbk_req);
}
-static int starfive_hash_digest_fb(struct ahash_request *req)
-{
- struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
- ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags,
- req->base.complete, req->base.data);
-
- ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
- req->result, req->nbytes);
-
- return crypto_ahash_digest(&rctx->ahash_fbk_req);
-}
-
static int starfive_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -370,9 +337,6 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
ctx->rctx = rctx;
- if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize))
- return starfive_hash_digest_fb(req);
-
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
}
@@ -406,7 +370,8 @@ static int starfive_hash_import(struct ahash_request *req, const void *in)
static int starfive_hash_init_tfm(struct crypto_ahash *hash,
const char *alg_name,
- unsigned int mode)
+ unsigned int mode,
+ bool is_hmac)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
@@ -426,7 +391,7 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) +
crypto_ahash_reqsize(ctx->ahash_fbk));
- ctx->keylen = 0;
+ ctx->is_hmac = is_hmac;
ctx->hash_mode = mode;
return 0;
@@ -529,81 +494,61 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha224-generic",
- STARFIVE_HASH_SHA224);
+ STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha256-generic",
- STARFIVE_HASH_SHA256);
+ STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha384-generic",
- STARFIVE_HASH_SHA384);
+ STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha512-generic",
- STARFIVE_HASH_SHA512);
+ STARFIVE_HASH_SHA512, 0);
}
static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sm3-generic",
- STARFIVE_HASH_SM3);
+ STARFIVE_HASH_SM3, 0);
}
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
- STARFIVE_HASH_SHA224);
+ STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
- STARFIVE_HASH_SHA256);
+ STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
- STARFIVE_HASH_SHA384);
+ STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
- STARFIVE_HASH_SHA512);
+ STARFIVE_HASH_SHA512, 1);
}
static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sm3-generic)",
- STARFIVE_HASH_SM3);
+ STARFIVE_HASH_SM3, 1);
}
static struct ahash_engine_alg algs_sha2_sm3[] = {
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index cf8bda7f0855..33093ba4b13a 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -45,6 +45,9 @@ static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
static void starfive_rsa_free_key(struct starfive_rsa_key *key)
{
+ if (!key->key_sz)
+ return;
+
kfree_sensitive(key->d);
kfree_sensitive(key->e);
kfree_sensitive(key->n);
@@ -273,7 +276,6 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
err_rsa_crypt:
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- kfree(rctx->rsa_data);
return ret;
}
@@ -534,16 +536,14 @@ static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->akcipher_fbk))
return PTR_ERR(ctx->akcipher_fbk);
- ctx->cryp = starfive_cryp_find_dev(ctx);
- if (!ctx->cryp) {
- crypto_free_akcipher(ctx->akcipher_fbk);
- return -ENODEV;
- }
-
akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
sizeof(struct crypto_akcipher) + 32);