summaryrefslogtreecommitdiff
path: root/drivers/crypto/qce
diff options
context:
space:
mode:
authorThara Gopinath <thara.gopinath@linaro.org>2021-04-29 18:07:06 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2021-05-14 14:07:56 +0300
commitdb0018a8b615e256c90a63d2d5698f2144dde222 (patch)
tree9d667b24c15f095cc003f2bc8a99e00c53b619a7 /drivers/crypto/qce
parente5d6181d35b257c13841f774f5ad36b0cb2d82aa (diff)
downloadlinux-db0018a8b615e256c90a63d2d5698f2144dde222.tar.xz
crypto: qce - Add support for AEAD algorithms
Add register programming sequence for enabling AEAD algorithms on the Qualcomm crypto engine. Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/qce')
-rw-r--r--drivers/crypto/qce/common.c162
1 files changed, 160 insertions, 2 deletions
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 7b3d6caec1b2..7c612ba5068f 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,7 @@
#include "core.h"
#include "regs-v5.h"
#include "sha.h"
+#include "aead.h"
static inline u32 qce_read(struct qce_device *qce, u32 offset)
{
@@ -96,7 +97,7 @@ static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
}
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
{
u32 cfg = 0;
@@ -139,7 +140,9 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
return cfg;
}
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
@@ -225,7 +228,7 @@ go_proc:
}
#endif
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
{
u32 cfg = 0;
@@ -271,7 +274,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
return cfg;
}
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
{
u8 swap[QCE_AES_IV_LENGTH];
@@ -386,6 +391,155 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
}
#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int len)
+{
+ u32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
+
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = be32_to_cpup((const __be32 *)s);
+ s += sizeof(u32);
+ d++;
+ }
+ return DIV_ROUND_UP(len, sizeof(u32));
+}
+
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
+ u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
+ u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
+ u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
+ unsigned int enc_keylen = ctx->enc_keylen;
+ unsigned int auth_keylen = ctx->auth_keylen;
+ unsigned int enc_ivsize = rctx->ivsize;
+ unsigned int auth_ivsize = 0;
+ unsigned int enckey_words, enciv_words;
+ unsigned int authkey_words, authiv_words, authnonce_words;
+ unsigned long flags = rctx->flags;
+ u32 encr_cfg, auth_cfg, config, totallen;
+ u32 iv_last_word;
+
+ qce_setup_config(qce);
+
+ /* Write encryption key */
+ enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen);
+ qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words);
+
+ /* Write encryption iv */
+ enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv, enc_ivsize);
+ qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words);
+
+ if (IS_CCM(rctx->flags)) {
+ iv_last_word = enciv[enciv_words - 1];
+ qce_write(qce, REG_CNTR3_IV3, iv_last_word + 1);
+ qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, enciv_words);
+ qce_write(qce, REG_CNTR_MASK, ~0);
+ qce_write(qce, REG_CNTR_MASK0, ~0);
+ qce_write(qce, REG_CNTR_MASK1, ~0);
+ qce_write(qce, REG_CNTR_MASK2, ~0);
+ }
+
+ /* Clear authentication IV and KEY registers of previous values */
+ qce_clear_array(qce, REG_AUTH_IV0, 16);
+ qce_clear_array(qce, REG_AUTH_KEY0, 16);
+
+ /* Clear byte count */
+ qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+ /* Write authentication key */
+ authkey_words = qce_be32_to_cpu_array(authkey, ctx->auth_key, auth_keylen);
+ qce_write_array(qce, REG_AUTH_KEY0, (u32 *)authkey, authkey_words);
+
+ /* Write initial authentication IV only for HMAC algorithms */
+ if (IS_SHA_HMAC(rctx->flags)) {
+ /* Write default authentication iv */
+ if (IS_SHA1_HMAC(rctx->flags)) {
+ auth_ivsize = SHA1_DIGEST_SIZE;
+ memcpy(authiv, std_iv_sha1, auth_ivsize);
+ } else if (IS_SHA256_HMAC(rctx->flags)) {
+ auth_ivsize = SHA256_DIGEST_SIZE;
+ memcpy(authiv, std_iv_sha256, auth_ivsize);
+ }
+ authiv_words = auth_ivsize / sizeof(u32);
+ qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words);
+ } else if (IS_CCM(rctx->flags)) {
+ /* Write nonce for CCM algorithms */
+ authnonce_words = qce_be32_to_cpu_array(authnonce, rctx->ccm_nonce, QCE_MAX_NONCE);
+ qce_write_array(qce, REG_AUTH_INFO_NONCE0, authnonce, authnonce_words);
+ }
+
+ /* Set up ENCR_SEG_CFG */
+ encr_cfg = qce_encr_cfg(flags, enc_keylen);
+ if (IS_ENCRYPT(flags))
+ encr_cfg |= BIT(ENCODE_SHIFT);
+ qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+
+ /* Set up AUTH_SEG_CFG */
+ auth_cfg = qce_auth_cfg(rctx->flags, auth_keylen, ctx->authsize);
+ auth_cfg |= BIT(AUTH_LAST_SHIFT);
+ auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+ if (IS_ENCRYPT(flags)) {
+ if (IS_CCM(rctx->flags))
+ auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+ else
+ auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
+ } else {
+ if (IS_CCM(rctx->flags))
+ auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
+ else
+ auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+ }
+ qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+ totallen = rctx->cryptlen + rctx->assoclen;
+
+ /* Set the encryption size and start offset */
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen + ctx->authsize);
+ else
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+ qce_write(qce, REG_ENCR_SEG_START, rctx->assoclen & 0xffff);
+
+ /* Set the authentication size and start offset */
+ qce_write(qce, REG_AUTH_SEG_SIZE, totallen);
+ qce_write(qce, REG_AUTH_SEG_START, 0);
+
+ /* Write total length */
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ qce_write(qce, REG_SEG_SIZE, totallen + ctx->authsize);
+ else
+ qce_write(qce, REG_SEG_SIZE, totallen);
+
+ /* get little endianness */
+ config = qce_config_reg(qce, 1);
+ qce_write(qce, REG_CONFIG, config);
+
+ /* Start the process */
+ qce_crypto_go(qce, !IS_CCM(flags));
+
+ return 0;
+}
+#endif
+
int qce_start(struct crypto_async_request *async_req, u32 type)
{
switch (type) {
@@ -397,6 +551,10 @@ int qce_start(struct crypto_async_request *async_req, u32 type)
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req);
#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+ case CRYPTO_ALG_TYPE_AEAD:
+ return qce_setup_regs_aead(async_req);
+#endif
default:
return -EINVAL;
}