summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam/caamalg_qi2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi2.c')
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c325
1 files changed, 206 insertions, 119 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 06bf32c32cbd..3443f6d6dd83 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -15,6 +15,7 @@
#include "key_gen.h"
#include "caamalg_desc.h"
#include "caamhash_desc.h"
+#include "dpseci-debugfs.h"
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -198,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -209,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -247,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -329,7 +322,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_keys keys;
- u32 flags;
int err;
err = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -340,14 +332,8 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
goto badkey;
- flags = crypto_aead_get_flags(aead);
- err = __des3_verify_key(&flags, keys.enckey);
- if (unlikely(err)) {
- crypto_aead_set_flags(aead, flags);
- goto out;
- }
-
- err = aead_setkey(aead, key, keylen);
+ err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
+ aead_setkey(aead, key, keylen);
out:
memzero_explicit(&keys, sizeof(keys));
@@ -719,6 +705,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -731,7 +722,13 @@ static int gcm_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
+ int ret;
+ ret = aes_check_keylen(keylen);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -805,6 +802,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -817,9 +819,13 @@ static int rfc4106_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
+ int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -900,6 +906,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ if (authsize != 16)
+ return -EINVAL;
+
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
@@ -911,9 +920,13 @@ static int rfc4543_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
+ int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -931,7 +944,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
}
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen, const u32 ctx1_iv_off)
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_skcipher_alg *alg =
@@ -941,34 +954,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_flc *flc;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128) &&
- ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
- OP_ALG_ALGSEL_CHACHA20);
const bool is_rfc3686 = alg->caam.rfc3686;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
-
- /*
- * RFC3686 specific:
- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- * | *key = {KEY, NONCE}
- */
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
- }
-
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@@ -996,11 +986,92 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != CHACHA_KEY_SIZE) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
- return unlikely(des3_verify_key(skcipher, key)) ?:
- skcipher_setkey(skcipher, key, keylen);
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
}
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -1227,10 +1298,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
@@ -1250,17 +1319,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((status & JRSTA_CCBERR_ERRID_MASK) ==
- JRSTA_CCBERR_ERRID_ICVCHK)
- ecode = -EBADMSG;
- else
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
@@ -1325,18 +1385,12 @@ static int aead_decrypt(struct aead_request *req)
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_encrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
}
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_decrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
}
static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
@@ -1352,10 +1406,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1371,7 +1423,9 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1390,10 +1444,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1409,7 +1461,9 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1423,6 +1477,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
struct caam_request *caam_req = skcipher_request_ctx(req);
int ret;
+ if (!req->cryptlen)
+ return 0;
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1451,6 +1508,8 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct caam_request *caam_req = skcipher_request_ctx(req);
int ret;
+ if (!req->cryptlen)
+ return 0;
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1545,7 +1604,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-aes-caam-qi2",
.cra_blocksize = AES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = aes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1577,7 +1636,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1593,7 +1652,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ctr-aes-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = ctr_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1611,7 +1670,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = rfc3686_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
@@ -1650,7 +1709,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "chacha20-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = chacha20_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = CHACHA_KEY_SIZE,
@@ -2918,6 +2977,7 @@ enum hash_optype {
/**
* caam_hash_ctx - ahash per-session context
* @flc: Flow Contexts array
+ * @key: authentication key
* @flc_dma: I/O virtual addresses of the Flow Contexts
* @dev: dpseci device
* @ctx_len: size of Context Register
@@ -2925,6 +2985,7 @@ enum hash_optype {
*/
struct caam_hash_ctx {
struct caam_flc flc[HASH_NUM_OP];
+ u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
dma_addr_t flc_dma[HASH_NUM_OP];
struct device *dev;
int ctx_len;
@@ -3094,10 +3155,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- if (err)
- caam_qi2_strstatus(res->dev, err);
-
- res->err = err;
+ res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
complete(&res->completion);
}
@@ -3228,6 +3286,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->adata.key_virt = key;
ctx->adata.key_inline = true;
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ if (keylen > ctx->adata.keylen_pad) {
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
+ ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
+ }
+
ret = ahash_set_sh_desc(ahash);
kfree(hashed_key);
return ret;
@@ -3282,10 +3353,8 @@ static void ahash_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
memcpy(req->result, state->caam_ctx, digestsize);
@@ -3310,10 +3379,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
switch_buf(state);
@@ -3343,10 +3410,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
memcpy(req->result, state->caam_ctx, digestsize);
@@ -3371,10 +3436,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
switch_buf(state);
@@ -4466,11 +4529,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
+ if (alg->setkey) {
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
+ dev_err(ctx->dev, "unable to map key\n");
+ return -ENOMEM;
+ }
+ }
+
dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->dev, dma_addr)) {
dev_err(ctx->dev, "unable to map shared descriptors\n");
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
return -ENOMEM;
}
@@ -4496,6 +4575,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
@@ -4700,7 +4783,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
if (unlikely(fd_err))
- dev_err(priv->dev, "FD error: %08x\n", fd_err);
+ dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
/*
* FD[ADDR] is guaranteed to be valid, irrespective of errors reported
@@ -5098,6 +5181,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
goto err_bind;
}
+ dpaa2_dpseci_debugfs_init(priv);
+
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
struct caam_skcipher_alg *t_alg = driver_algs + i;
@@ -5265,6 +5350,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
dev = &ls_dev->dev;
priv = dev_get_drvdata(dev);
+ dpaa2_dpseci_debugfs_exit(priv);
+
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
struct caam_aead_alg *t_alg = driver_aeads + i;