summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-06 01:51:21 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-06 01:51:21 +0300
commit3e1a29b3bf66c2850ea8eba78c59c234921c0b69 (patch)
tree641a5428e3a1ef205fafede3d6a03dae85d30e92 /drivers
parentfd59ccc53062964007beda8787ffd9cd93968d63 (diff)
parentb268b3506d9910ca8238e92cb1dc51340574b2f2 (diff)
downloadlinux-3e1a29b3bf66c2850ea8eba78c59c234921c0b69.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Decryption test vectors are now automatically generated from encryption test vectors. Algorithms: - Fix unaligned access issues in crc32/crc32c. - Add zstd compression algorithm. - Add AEGIS. - Add MORUS. Drivers: - Add accelerated AEGIS/MORUS on x86. - Add accelerated SM4 on arm64. - Removed x86 assembly salsa implementation as it is slower than C. - Add authenc(hmac(sha*), cbc(aes)) support in inside-secure. - Add ctr(aes) support in crypto4xx. - Add hardware key support in ccree. - Add support for new Centaur CPU in via-rng" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (112 commits) crypto: chtls - free beyond end rspq_skb_cache crypto: chtls - kbuild warnings crypto: chtls - dereference null variable crypto: chtls - wait for memory sendmsg, sendpage crypto: chtls - key len correction crypto: salsa20 - Revert "crypto: salsa20 - export generic helpers" crypto: x86/salsa20 - remove x86 salsa20 implementations crypto: ccp - Add GET_ID SEV command crypto: ccp - Add DOWNLOAD_FIRMWARE SEV command crypto: qat - Add MODULE_FIRMWARE for all qat drivers crypto: ccree - silence debug prints crypto: ccree - better clock handling crypto: ccree - correct host regs offset crypto: chelsio - Remove separate buffer used for DMA map B0 block in CCM crypt: chelsio - Send IV as Immediate for cipher algo crypto: chelsio - Return -ENOSPC for transient busy indication. crypto: caam/qi - fix warning in init_cgr() crypto: caam - fix rfc4543 descriptors crypto: caam - fix MC firmware detection crypto: clarify licensing of OpenSSL asm code ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig1
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/crypto/Kconfig16
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c231
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c317
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h35
-rw-r--r--drivers/crypto/caam/caamalg.c231
-rw-r--r--drivers/crypto/caam/caamalg_desc.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c227
-rw-r--r--drivers/crypto/caam/caampkc.c71
-rw-r--r--drivers/crypto/caam/caampkc.h8
-rw-r--r--drivers/crypto/caam/ctrl.c78
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/qi.c5
-rw-r--r--drivers/crypto/caam/regs.h6
-rw-r--r--drivers/crypto/cavium/zip/common.h21
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c22
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c4
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c4
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c4
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c24
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h1
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h42
-rw-r--r--drivers/crypto/ccp/psp-dev.c143
-rw-r--r--drivers/crypto/ccp/psp-dev.h4
-rw-r--r--drivers/crypto/ccree/cc_cipher.c350
-rw-r--r--drivers/crypto/ccree/cc_cipher.h30
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c7
-rw-r--r--drivers/crypto/ccree/cc_driver.c36
-rw-r--r--drivers/crypto/ccree/cc_driver.h2
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c303
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h3
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h15
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c35
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h11
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c28
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c6
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c158
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c16
-rw-r--r--drivers/crypto/exynos-rng.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c32
-rw-r--r--drivers/crypto/inside-secure/safexcel.h44
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c642
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c23
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c6
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c1
-rw-r--r--drivers/crypto/vmx/aes.c2
-rw-r--r--drivers/crypto/vmx/aes_cbc.c3
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl49
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl9
62 files changed, 2265 insertions, 1089 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index d53541e96bee..c34b257d852d 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -347,6 +347,7 @@ config HW_RANDOM_STM32
tristate "STMicroelectronics STM32 random number generator"
depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
depends on HAS_IOMEM
+ default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
Generator hardware found on STM32 microcontrollers.
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 92dd4e925315..f8411515fe1c 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -435,7 +435,7 @@ static int n2rng_data_read(struct hwrng *rng, u32 *data)
*data = np->test_data & 0xffffffff;
len = 4;
} else {
- dev_err(&np->op->dev, "RNG error, restesting\n");
+ dev_err(&np->op->dev, "RNG error, retesting\n");
np->flags &= ~N2RNG_FLAG_READY;
if (!(np->flags & N2RNG_FLAG_SHUTDOWN))
schedule_delayed_work(&np->work, 0);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 0d2328da3b76..042860d97b15 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -187,8 +187,13 @@ static int stm32_rng_runtime_resume(struct device *dev)
}
#endif
-static UNIVERSAL_DEV_PM_OPS(stm32_rng_pm_ops, stm32_rng_runtime_suspend,
- stm32_rng_runtime_resume, NULL);
+static const struct dev_pm_ops stm32_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
+ stm32_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static const struct of_device_id stm32_rng_match[] = {
{
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 6e9df558325b..ffe9b0c6c647 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -135,7 +135,7 @@ static int via_rng_init(struct hwrng *rng)
* is always enabled if CPUID rng_en is set. There is no
* RNG configuration like it used to be the case in this
* register */
- if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
+ if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){
if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
pr_err(PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index d1ea1a07cecb..43cccf6aff61 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -302,6 +302,7 @@ config CRYPTO_DEV_PPC4XX
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_CCM
+ select CRYPTO_CTR
select CRYPTO_GCM
select CRYPTO_BLKCIPHER
help
@@ -419,7 +420,7 @@ config CRYPTO_DEV_EXYNOS_RNG
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
select CRYPTO_AES
select CRYPTO_BLKCIPHER
help
@@ -466,7 +467,6 @@ endif # if CRYPTO_DEV_UX500
config CRYPTO_DEV_ATMEL_AUTHENC
tristate "Support for Atmel IPSEC/SSL hw accelerator"
- depends on HAS_DMA
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_AUTHENC
select CRYPTO_DEV_ATMEL_AES
@@ -479,7 +479,6 @@ config CRYPTO_DEV_ATMEL_AUTHENC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
- depends on HAS_DMA
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
@@ -494,7 +493,6 @@ config CRYPTO_DEV_ATMEL_AES
config CRYPTO_DEV_ATMEL_TDES
tristate "Support for Atmel DES/TDES hw accelerator"
- depends on HAS_DMA
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_DES
select CRYPTO_BLKCIPHER
@@ -508,7 +506,6 @@ config CRYPTO_DEV_ATMEL_TDES
config CRYPTO_DEV_ATMEL_SHA
tristate "Support for Atmel SHA hw accelerator"
- depends on HAS_DMA
depends on ARCH_AT91 || COMPILE_TEST
select CRYPTO_HASH
help
@@ -574,7 +571,8 @@ config CRYPTO_DEV_CAVIUM_ZIP
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
- depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
select CRYPTO_AES
select CRYPTO_DES
select CRYPTO_ECB
@@ -598,7 +596,6 @@ source "drivers/crypto/vmx/Kconfig"
config CRYPTO_DEV_IMGTEC_HASH
tristate "Imagination Technologies hardware hash accelerator"
depends on MIPS || COMPILE_TEST
- depends on HAS_DMA
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
@@ -650,7 +647,6 @@ config CRYPTO_DEV_ROCKCHIP
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
- depends on HAS_DMA
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
@@ -688,9 +684,10 @@ source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
- depends on HAS_DMA && OF
+ depends on OF
depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT)
select CRYPTO_AES
+ select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_HMAC
@@ -706,7 +703,6 @@ config CRYPTO_DEV_SAFEXCEL
config CRYPTO_DEV_ARTPEC6
tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration."
depends on ARM && (ARCH_ARTPEC || COMPILE_TEST)
- depends on HAS_DMA
depends on OF
select CRYPTO_AEAD
select CRYPTO_AES
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index ea83d0bff0e9..f5c07498ea4f 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -31,6 +31,7 @@
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/ctr.h>
+#include <crypto/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -74,51 +75,57 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
sa->sa_command_1.bf.copy_hdr = cp_hdr;
}
-int crypto4xx_encrypt(struct ablkcipher_request *req)
+static inline int crypto4xx_crypt(struct skcipher_request *req,
+ const unsigned int ivlen, bool decrypt)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int ivlen = crypto_ablkcipher_ivsize(
- crypto_ablkcipher_reqtfm(req));
- __le32 iv[ivlen];
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ __le32 iv[AES_IV_SIZE];
if (ivlen)
- crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
+ req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, 0, NULL);
}
-int crypto4xx_decrypt(struct ablkcipher_request *req)
+int crypto4xx_encrypt_noiv(struct skcipher_request *req)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int ivlen = crypto_ablkcipher_ivsize(
- crypto_ablkcipher_reqtfm(req));
- __le32 iv[ivlen];
+ return crypto4xx_crypt(req, 0, false);
+}
- if (ivlen)
- crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+int crypto4xx_encrypt_iv(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false);
+}
- return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
+int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, true);
+}
+
+int crypto4xx_decrypt_iv(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, true);
}
/**
* AES Functions
*/
-static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
+static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen,
unsigned char cm,
u8 fb)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
struct dynamic_sa_ctl *sa;
int rc;
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_ablkcipher_set_flags(cipher,
+ crypto_skcipher_set_flags(cipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -134,7 +141,8 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
/* Setup SA */
sa = ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
+ SA_SAVE_IV : SA_NOT_SAVE_IV),
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
@@ -158,39 +166,38 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
return 0;
}
-int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
CRYPTO_FEEDBACK_MODE_128BIT_CFB);
}
-int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
CRYPTO_FEEDBACK_MODE_64BIT_OFB);
}
-int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
int rc;
rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
@@ -204,35 +211,117 @@ int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
return 0;
}
-int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE / 4] = {
ctx->iv_nonce,
- cpu_to_le32p((u32 *) req->info),
- cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32p((u32 *) req->iv),
+ cpu_to_le32p((u32 *) (req->iv + 4)),
cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, AES_IV_SIZE,
- ctx->sa_out, ctx->sa_len, 0);
+ req->cryptlen, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0, NULL);
}
-int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE / 4] = {
ctx->iv_nonce,
- cpu_to_le32p((u32 *) req->info),
- cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32p((u32 *) req->iv),
+ cpu_to_le32p((u32 *) (req->iv + 4)),
cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, AES_IV_SIZE,
- ctx->sa_out, ctx->sa_len, 0);
+ req->cryptlen, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0, NULL);
+}
+
+static int
+crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
+ unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+ AES_BLOCK_SIZE;
+
+ /*
+ * The hardware uses only the last 32-bits as the counter while the
+ * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+ * the whole IV is a counter. So fallback if the counter is going to
+ * overlow.
+ */
+ if (counter + nblks < counter) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+ int ret;
+
+ skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ return encrypt ? crypto4xx_encrypt_iv(req)
+ : crypto4xx_decrypt_iv(req);
+}
+
+static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int rc;
+
+ crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+ crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+ crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_set_flags(cipher,
+ crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+ CRYPTO_TFM_RES_MASK);
+
+ return rc;
+}
+
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int rc;
+
+ rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ return crypto4xx_setkey_aes(cipher, key, keylen,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_encrypt_ctr(struct skcipher_request *req)
+{
+ return crypto4xx_ctr_crypt(req, true);
+}
+
+int crypto4xx_decrypt_ctr(struct skcipher_request *req)
+{
+ return crypto4xx_ctr_crypt(req, false);
}
static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ unsigned int len,
bool is_ccm, bool decrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -242,14 +331,14 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
return true;
/*
- * hardware does not handle cases where cryptlen
- * is less than a block
+ * hardware does not handle cases where plaintext
+ * is less than a block.
*/
- if (req->cryptlen < AES_BLOCK_SIZE)
+ if (len < AES_BLOCK_SIZE)
return true;
- /* assoc len needs to be a multiple of 4 */
- if (req->assoclen & 0x3)
+ /* assoc len needs to be a multiple of 4 and <= 1020 */
+ if (req->assoclen & 0x3 || req->assoclen > 1020)
return true;
/* CCM supports only counter field length of 2 and 4 bytes */
@@ -262,13 +351,7 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
static int crypto4xx_aead_fallback(struct aead_request *req,
struct crypto4xx_ctx *ctx, bool do_decrypt)
{
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(ctx->sw_cipher.aead)]
- __aligned(__alignof__(struct aead_request));
-
- struct aead_request *subreq = (void *) aead_req_data;
-
- memset(subreq, 0, sizeof(aead_req_data));
+ struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
aead_request_set_callback(subreq, req->base.flags,
@@ -280,10 +363,10 @@ static int crypto4xx_aead_fallback(struct aead_request *req,
crypto_aead_encrypt(subreq);
}
-static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
- struct crypto_aead *cipher,
- const u8 *key,
- unsigned int keylen)
+static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
{
int rc;
@@ -311,7 +394,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
struct dynamic_sa_ctl *sa;
int rc = 0;
- rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
@@ -366,19 +449,20 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- unsigned int len = req->cryptlen;
__le32 iv[16];
- u32 tmp_sa[ctx->sa_len * 4];
+ u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
-
- if (crypto4xx_aead_need_fallback(req, true, decrypt))
- return crypto4xx_aead_fallback(req, ctx, decrypt);
+ unsigned int len = req->cryptlen;
if (decrypt)
len -= crypto_aead_authsize(aead);
- memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+ if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
if (req->iv[0] == 1) {
@@ -391,7 +475,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
- sa, ctx->sa_len, req->assoclen);
+ sa, ctx->sa_len, req->assoclen, rctx->dst);
}
int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
@@ -470,7 +554,7 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
return -EINVAL;
}
- rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
@@ -523,22 +607,23 @@ static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int len = req->cryptlen;
+ struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
__le32 iv[4];
+ unsigned int len = req->cryptlen;
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
- if (crypto4xx_aead_need_fallback(req, false, decrypt))
+ if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
return crypto4xx_aead_fallback(req, ctx, decrypt);
crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
iv[3] = cpu_to_le32(1);
- if (decrypt)
- len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
-
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
decrypt ? ctx->sa_in : ctx->sa_out,
- ctx->sa_len, req->assoclen);
+ ctx->sa_len, req->assoclen, rctx->dst);
}
int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
@@ -623,7 +708,7 @@ int crypto4xx_hash_update(struct ahash_request *req)
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0);
+ ctx->sa_len, 0, NULL);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -642,7 +727,7 @@ int crypto4xx_hash_digest(struct ahash_request *req)
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0);
+ ctx->sa_len, 0, NULL);
}
/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 76f459ad2821..9cb234c72549 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -41,6 +41,7 @@
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
@@ -526,31 +527,38 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
}
}
-static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct crypto4xx_ctx *ctx;
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *req;
struct scatterlist *dst;
dma_addr_t addr;
- ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
- ctx = crypto_tfm_ctx(ablk_req->base.tfm);
+ req = skcipher_request_cast(pd_uinfo->async_req);
if (pd_uinfo->using_sd) {
- crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
- ablk_req->dst);
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
addr = dma_map_page(dev->core_dev->device, sg_page(dst),
dst->offset, dst->length, DMA_FROM_DEVICE);
}
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+
+ crypto4xx_memcpy_from_le32((u32 *)req->iv,
+ pd_uinfo->sr_va->save_iv,
+ crypto_skcipher_ivsize(skcipher));
+ }
+
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
- ablkcipher_request_complete(ablk_req, -EINPROGRESS);
- ablkcipher_request_complete(ablk_req, 0);
+ skcipher_request_complete(req, -EINPROGRESS);
+ skcipher_request_complete(req, 0);
}
static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
@@ -580,7 +588,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct scatterlist *dst = pd_uinfo->dest_va;
size_t cp_len = crypto_aead_authsize(
crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
+ u32 icv[AES_BLOCK_SIZE];
int err = 0;
if (pd_uinfo->using_sd) {
@@ -595,7 +603,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
- cp_len);
+ sizeof(icv));
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
@@ -605,7 +613,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
aead_req->assoclen + aead_req->cryptlen -
cp_len, cp_len, 0);
- crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+ crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
err = -EBADMSG;
@@ -641,8 +649,8 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto4xx_cipher_done(dev, pd_uinfo, pd);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
@@ -687,9 +695,9 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
- const unsigned int assoclen)
+ const unsigned int assoclen,
+ struct scatterlist *_dst)
{
- struct scatterlist _dst[2];
struct crypto4xx_device *dev = ctx->dev;
struct dynamic_sa_ctl *sa;
struct ce_gd *gd;
@@ -936,15 +944,27 @@ static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
ctx->sa_len = 0;
}
-static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+static int crypto4xx_sk_init(struct crypto_skcipher *sk)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(sk);
struct crypto4xx_alg *amcc_alg;
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
+
+ if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->sw_cipher.cipher =
+ crypto_alloc_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.cipher))
+ return PTR_ERR(ctx->sw_cipher.cipher);
+
+ crypto_skcipher_set_reqsize(sk,
+ sizeof(struct skcipher_request) + 32 +
+ crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
+ }
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
crypto4xx_ctx_init(amcc_alg, ctx);
- tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
return 0;
}
@@ -953,9 +973,13 @@ static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
crypto4xx_free_sa(ctx);
}
-static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
{
- crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
+
+ crypto4xx_common_exit(ctx);
+ if (ctx->sw_cipher.cipher)
+ crypto_free_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
@@ -972,9 +996,9 @@ static int crypto4xx_aead_init(struct crypto_aead *tfm)
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
crypto4xx_ctx_init(amcc_alg, ctx);
- crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
- max(sizeof(struct crypto4xx_ctx), 32 +
- crypto_aead_reqsize(ctx->sw_cipher.aead)));
+ crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead),
+ sizeof(struct crypto4xx_aead_reqctx)));
return 0;
}
@@ -1012,7 +1036,7 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
break;
default:
- rc = crypto_register_alg(&alg->alg.u.cipher);
+ rc = crypto_register_skcipher(&alg->alg.u.cipher);
break;
}
@@ -1041,7 +1065,7 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
break;
default:
- crypto_unregister_alg(&alg->alg.u.cipher);
+ crypto_unregister_skcipher(&alg->alg.u.cipher);
}
kfree(alg);
}
@@ -1103,126 +1127,131 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
*/
static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
- }
- }
- }},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = crypto4xx_setkey_rfc3686,
- .encrypt = crypto4xx_rfc3686_encrypt,
- .decrypt = crypto4xx_rfc3686_decrypt,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ctr,
+ .encrypt = crypto4xx_encrypt_ctr,
+ .decrypt = crypto4xx_decrypt_ctr,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt_noiv,
+ .decrypt = crypto4xx_decrypt_noiv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
/* AEAD */
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 23b726da6534..e2ca56722f07 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -25,6 +25,7 @@
#include <linux/ratelimit.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
@@ -127,14 +128,19 @@ struct crypto4xx_ctx {
__le32 iv_nonce;
u32 sa_len;
union {
+ struct crypto_skcipher *cipher;
struct crypto_aead *aead;
} sw_cipher;
};
+struct crypto4xx_aead_reqctx {
+ struct scatterlist dst[2];
+};
+
struct crypto4xx_alg_common {
u32 type;
union {
- struct crypto_alg cipher;
+ struct skcipher_alg cipher;
struct ahash_alg hash;
struct aead_alg aead;
} u;
@@ -157,21 +163,28 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
- const unsigned int assoclen);
-int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const unsigned int assoclen,
+ struct scatterlist *dst_tmp);
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_encrypt(struct ablkcipher_request *req);
-int crypto4xx_decrypt(struct ablkcipher_request *req);
-int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
-int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+int crypto4xx_encrypt_ctr(struct skcipher_request *req);
+int crypto4xx_decrypt_ctr(struct skcipher_request *req);
+int crypto4xx_encrypt_iv(struct skcipher_request *req);
+int crypto4xx_decrypt_iv(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
int crypto4xx_hash_digest(struct ahash_request *req);
int crypto4xx_hash_final(struct ahash_request *req);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7207a535942d..d67667970f7e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -769,15 +769,18 @@ struct aead_edesc {
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
+ * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * and IV
*/
struct ablkcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
+ enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -787,7 +790,8 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize,
+ enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
@@ -799,7 +803,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -810,7 +814,7 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0,
+ edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -823,7 +827,7 @@ static void ablkcipher_unmap(struct device *dev,
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize,
+ edesc->iv_dma, ivsize, edesc->iv_dir,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -912,6 +916,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
ivsize, 0);
+ /* In case initial IV was generated, copy it in GIVCIPHER request */
+ if (edesc->iv_dir == DMA_FROM_DEVICE) {
+ u8 *iv;
+ struct skcipher_givcrypt_request *greq;
+
+ greq = container_of(req, struct skcipher_givcrypt_request,
+ creq);
+ iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
+ edesc->sec4_sg_bytes;
+ memcpy(greq->giv, iv, ivsize);
+ }
+
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -922,10 +938,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -943,14 +959,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
-
- /*
- * The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
- ivsize, 0);
-
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -1099,15 +1107,14 @@ static void init_authenc_job(struct aead_request *req,
*/
static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req,
- bool iv_contig)
+ struct ablkcipher_request *req)
{
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc = edesc->hw_desc;
- u32 out_options = 0, in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
+ u32 out_options = 0;
+ dma_addr_t dst_dma;
+ int len;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1123,30 +1130,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- if (iv_contig) {
- src_dma = edesc->iv_dma;
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents + 1;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+ LDST_SGF);
if (likely(req->src == req->dst)) {
- if (edesc->src_nents == 1 && iv_contig) {
- dst_dma = sg_dma_address(req->src);
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
+ dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
} else {
if (edesc->dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index * sizeof(struct sec4_sg_entry);
+ dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
+ sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
}
@@ -1158,13 +1153,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
*/
static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req,
- bool iv_contig)
+ struct ablkcipher_request *req)
{
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc = edesc->hw_desc;
- u32 out_options, in_options;
+ u32 in_options;
dma_addr_t dst_dma, src_dma;
int len, sec4_sg_index = 0;
@@ -1190,15 +1184,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
- if (iv_contig) {
- dst_dma = edesc->iv_dma;
- out_options = 0;
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index * sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
}
/*
@@ -1287,7 +1275,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1491,8 +1479,7 @@ static int aead_decrypt(struct aead_request *req)
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes,
- bool *iv_contig_out)
+ *req, int desc_bytes)
{
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
@@ -1501,8 +1488,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma = 0;
- bool in_contig;
+ dma_addr_t iv_dma;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1546,33 +1533,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
}
- iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->src)) {
- in_contig = true;
- sec4_sg_ents = 0;
- } else {
- in_contig = false;
- sec4_sg_ents = 1 + mapped_src_nents;
- }
+ sec4_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = sec4_sg_ents;
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ /*
+ * allocate space for base edesc and hw desc commands, link tables, IV
+ */
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1581,13 +1555,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
+ edesc->iv_dir = DMA_TO_DEVICE;
- if (!in_contig) {
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, mapped_src_nents,
- edesc->sec4_sg + 1, 0);
+ /* Make sure IV is located in a DMAable area */
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ memcpy(iv, req->info, ivsize);
+
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
}
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+
if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + dst_sg_idx, 0);
@@ -1598,7 +1583,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1611,7 +1596,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, 1);
#endif
- *iv_contig_out = in_contig;
return edesc;
}
@@ -1621,19 +1605,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &iv_contig);
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc,
- ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+ init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -1657,20 +1638,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
struct ablkcipher_edesc *edesc;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &iv_contig);
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ ivsize, 0);
+
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec,
- ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
desc = edesc->hw_desc;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
@@ -1695,8 +1681,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
*/
static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
struct skcipher_givcrypt_request *greq,
- int desc_bytes,
- bool *iv_contig_out)
+ int desc_bytes)
{
struct ablkcipher_request *req = &greq->creq;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
@@ -1706,8 +1691,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma = 0;
- bool out_contig;
+ dma_addr_t iv_dma;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1752,36 +1737,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
}
- /*
- * Check if iv can be contiguous with source and destination.
- * If so, include it. If not, create scatterlist.
- */
- iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
dst_sg_idx = sec4_sg_ents;
- if (mapped_dst_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->dst)) {
- out_contig = true;
- } else {
- out_contig = false;
- sec4_sg_ents += 1 + mapped_dst_nents;
- }
+ sec4_sg_ents += 1 + mapped_dst_nents;
- /* allocate space for base edesc and hw desc commands, link tables */
+ /*
+ * allocate space for base edesc and hw desc commands, link tables, IV
+ */
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1790,24 +1759,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
+ edesc->iv_dir = DMA_FROM_DEVICE;
+
+ /* Make sure IV is located in a DMAable area */
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
if (mapped_src_nents > 1)
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
0);
- if (!out_contig) {
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
- iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
- edesc->sec4_sg + dst_sg_idx + 1, 0);
- }
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
+ dst_sg_idx + 1, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1820,7 +1798,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, 1);
#endif
- *iv_contig_out = out_contig;
return edesc;
}
@@ -1831,19 +1808,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig = false;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &iv_contig);
+ edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req, iv_contig);
+ edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ablkcipher jobdesc@" __stringify(__LINE__) ": ",
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 8ae7a1be7dfd..a408edd84f34 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1093,7 +1093,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
(0x6 << MOVE_LEN_SHIFT));
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
+ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
/* Will read assoclen + cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1178,7 +1178,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
(0x6 << MOVE_LEN_SHIFT));
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
+ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
/* Will read assoclen + cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index cacda0831390..6e61cc93c2b0 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -728,7 +728,7 @@ badkey:
* @assoclen: associated data length, in CAAM endianness
* @assoclen_dma: bus physical mapped address of req->assoclen
* @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
*/
struct aead_edesc {
int src_nents;
@@ -739,9 +739,6 @@ struct aead_edesc {
unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_AEAD_SG \
- ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
- sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -753,7 +750,7 @@ struct aead_edesc {
* @qm_sg_bytes: length of dma mapped h/w link table
* @qm_sg_dma: bus physical mapped address of h/w link table
* @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
*/
struct ablkcipher_edesc {
int src_nents;
@@ -762,9 +759,6 @@ struct ablkcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_ABLKCIPHER_SG \
- ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
- sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
}
- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
ivsize = crypto_aead_ivsize(aead);
- iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, op_type, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
- }
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
@@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
*/
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
- qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
- sg_table = &edesc->sgt[0];
- qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
@@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
#endif
ablkcipher_unmap(qidev, edesc, req);
- qi_cache_free(edesc);
+
+ /* In case initial IV was generated, copy it in GIVCIPHER request */
+ if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
+ u8 *iv;
+ struct skcipher_givcrypt_request *greq;
+
+ greq = container_of(req, struct skcipher_givcrypt_request,
+ creq);
+ iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
+ memcpy(greq->giv, iv, ivsize);
+ }
/*
* The crypto API expects us to set the IV (req->info) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
- ivsize, 0);
+ if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ ivsize, ivsize, 0);
+ qi_cache_free(edesc);
ablkcipher_request_complete(req, status);
}
@@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma;
- bool in_contig;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, qm_sg_ents;
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
@@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
}
- iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->src)) {
- in_contig = true;
- qm_sg_ents = 0;
- } else {
- in_contig = false;
- qm_sg_ents = 1 + mapped_src_nents;
- }
+ qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
- if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
- qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+ if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- /* allocate space for base edesc and link tables */
+ /* allocate space for base edesc, link tables and IV */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->info, ivsize);
+
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- sg_table = &edesc->sgt[0];
- edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = ablkcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
- if (!in_contig) {
- dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
- }
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
if (mapped_dst_nents > 1)
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
@@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
fd_sgt = &edesc->drv_req.fd_sgt[0];
- if (!in_contig)
- dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
- else
- dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
- 0);
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+ ivsize + req->nbytes, 0);
if (req->src == req->dst) {
- if (!in_contig)
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
- else
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
- req->nbytes, 0);
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+ sizeof(*sg_table), req->nbytes, 0);
} else if (mapped_dst_nents > 1) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table), req->nbytes, 0);
@@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma;
- bool out_contig;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents;
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct caam_drv_ctx *drv_ctx;
drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
@@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
mapped_dst_nents = src_nents;
}
- iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
dst_sg_idx = qm_sg_ents;
- if (mapped_dst_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->dst)) {
- out_contig = true;
- } else {
- out_contig = false;
- qm_sg_ents += 1 + mapped_dst_nents;
- }
- if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
- qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ qm_sg_ents += 1 + mapped_dst_nents;
+ qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+ if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- /* allocate space for base edesc and link tables */
+ /* allocate space for base edesc, link tables and IV */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (!edesc) {
dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- sg_table = &edesc->sgt[0];
- edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = ablkcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
@@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (mapped_src_nents > 1)
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
- if (!out_contig) {
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx + 1, 0);
- }
+ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
+ 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
@@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
req->nbytes, 0);
- if (!out_contig)
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes,
- 0);
- else
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- ivsize + req->nbytes, 0);
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), ivsize + req->nbytes, 0);
return edesc;
}
@@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
struct ablkcipher_edesc *edesc;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
int ret;
if (unlikely(caam_congested))
@@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block.
+ */
+ if (!encrypt)
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ ivsize, ivsize, 0);
+
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 7a897209f181..578ea63a3109 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
akcipher_request_complete(req, err);
}
+static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+ unsigned int nbytes,
+ unsigned int flags)
+{
+ struct sg_mapping_iter miter;
+ int lzeros, ents;
+ unsigned int len;
+ unsigned int tbytes = nbytes;
+ const u8 *buff;
+
+ ents = sg_nents_for_len(sgl, nbytes);
+ if (ents < 0)
+ return ents;
+
+ sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
+
+ lzeros = 0;
+ len = 0;
+ while (nbytes > 0) {
+ while (len && !*buff) {
+ lzeros++;
+ len--;
+ buff++;
+ }
+
+ if (len && *buff)
+ break;
+
+ sg_miter_next(&miter);
+ buff = miter.addr;
+ len = miter.length;
+
+ nbytes -= lzeros;
+ lzeros = 0;
+ }
+
+ miter.consumed = lzeros;
+ sg_miter_stop(&miter);
+ nbytes -= lzeros;
+
+ return tbytes - nbytes;
+}
+
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
size_t desclen)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
+ int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ int lzeros;
+
+ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
+ if (lzeros < 0)
+ return ERR_PTR(lzeros);
+
+ req->src_len -= lzeros;
+ req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
src_nents = sg_nents_for_len(req->src, req->src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
@@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->d_dma)) {
@@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->p_dma)) {
@@ -730,19 +783,12 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
*/
static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
{
- u8 *val;
caam_rsa_drop_leading_zeros(&buf, nbytes);
if (!*nbytes)
return NULL;
- val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
- if (!val)
- return NULL;
-
- memcpy(val, buf, *nbytes);
-
- return val;
+ return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
}
static int caam_rsa_check_key_length(unsigned int len)
@@ -953,6 +999,7 @@ static struct akcipher_alg caam_rsa = {
.max_size = caam_rsa_max_size,
.init = caam_rsa_init_tfm,
.exit = caam_rsa_exit_tfm,
+ .reqsize = sizeof(struct caam_rsa_req_ctx),
.base = {
.cra_name = "rsa",
.cra_driver_name = "rsa-caam",
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index fd145c46eae1..82645bcf8b27 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -96,6 +96,14 @@ struct caam_rsa_ctx {
};
/**
+ * caam_rsa_req_ctx - per request context.
+ * @src: input scatterlist (stripped of leading zeros)
+ */
+struct caam_rsa_req_ctx {
+ struct scatterlist src[2];
+};
+
+/**
* rsa_edesc - s/w-extended rsa descriptor
* @src_nents : number of segments in input scatterlist
* @dst_nents : number of segments in output scatterlist
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e4cc636e1104..538c01f428c1 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -322,9 +322,9 @@ static int caam_remove(struct platform_device *pdev)
/*
* De-initialize RNG state handles initialized by this driver.
- * In case of DPAA 2.x, RNG is managed by MC firmware.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
+ if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
@@ -396,11 +396,56 @@ start_rng:
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
+static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+{
+ static const struct {
+ u16 ip_id;
+ u8 maj_rev;
+ u8 era;
+ } id[] = {
+ {0x0A10, 1, 1},
+ {0x0A10, 2, 2},
+ {0x0A12, 1, 3},
+ {0x0A14, 1, 3},
+ {0x0A14, 2, 4},
+ {0x0A16, 1, 4},
+ {0x0A10, 3, 4},
+ {0x0A11, 1, 4},
+ {0x0A18, 1, 4},
+ {0x0A11, 2, 5},
+ {0x0A12, 2, 5},
+ {0x0A13, 1, 5},
+ {0x0A1C, 1, 5}
+ };
+ u32 ccbvid, id_ms;
+ u8 maj_rev, era;
+ u16 ip_id;
+ int i;
+
+ ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+ era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
+ if (era) /* This is '0' prior to CAAM ERA-6 */
+ return era;
+
+ id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+ ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
+ maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(id); i++)
+ if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
+ return id[i].era;
+
+ return -ENOTSUPP;
+}
+
/**
* caam_get_era() - Return the ERA of the SEC on SoC, based
- * on "sec-era" propery in the DTS. This property is updated by u-boot.
+ * on "sec-era" optional property in the DTS. This property is updated
+ * by u-boot.
+ * In case this property is not passed an attempt to retrieve the CAAM
+ * era via register reads will be made.
**/
-int caam_get_era(void)
+static int caam_get_era(struct caam_ctrl __iomem *ctrl)
{
struct device_node *caam_node;
int ret;
@@ -410,9 +455,11 @@ int caam_get_era(void)
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
of_node_put(caam_node);
- return ret ? -ENOTSUPP : prop;
+ if (!ret)
+ return prop;
+ else
+ return caam_get_era_from_hw(ctrl);
}
-EXPORT_SYMBOL(caam_get_era);
static const struct of_device_id caam_match[] = {
{
@@ -571,11 +618,15 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register.
- * In case of DPAA 2.x, Management Complex firmware performs
+ * In case of SoCs with Management Complex, MC f/w performs
* the configuration.
*/
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
- if (!caam_dpaa2)
+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+ ctrlpriv->mc_en = !!np;
+ of_node_put(np);
+
+ if (!ctrlpriv->mc_en)
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
MCFGR_WDENABLE | MCFGR_LARGE_BURST |
@@ -623,7 +674,7 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
- ctrlpriv->era = caam_get_era();
+ ctrlpriv->era = caam_get_era(ctrl);
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
@@ -686,9 +737,9 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
- * In case of DPAA 2.x, RNG is managed by MC firmware.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!caam_dpaa2 &&
+ if (!ctrlpriv->mc_en &&
(cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
@@ -757,9 +808,8 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
ctrlpriv->era);
- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
- caam_dpaa2 ? "yes" : "no");
+ dev_info(dev, "job rings = %d, qi = %d\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index be693a2cc25e..f3ecd67922a7 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -9,8 +9,6 @@
#define CTRL_H
/* Prototypes for backend-level services exposed to APIs */
-int caam_get_era(void);
-
extern bool caam_dpaa2;
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 7696a774a362..babc78abd155 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -82,6 +82,7 @@ struct caam_drv_private {
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
+ u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b9480828da38..67f7f8c42c93 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -657,9 +657,8 @@ static int init_cgr(struct device *qidev)
{
int ret;
struct qm_mcc_initcgr opts;
- const u64 cpus = *(u64 *)qman_affine_cpus();
- const int num_cpus = hweight64(cpus);
- const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
+ const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
+ MAX_RSP_FQ_BACKLOG_PER_CPU;
ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
if (ret) {
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index fee363865d88..4fb91ba39c36 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -312,11 +312,17 @@ struct caam_perfmon {
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
+#define CCBVID_ERA_MASK 0xff000000
+#define CCBVID_ERA_SHIFT 24
u32 ccb_id; /* CCBVID - CCB Version ID */
u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
+#define SECVID_MS_IPID_MASK 0xffff0000
+#define SECVID_MS_IPID_SHIFT 16
+#define SECVID_MS_MAJ_REV_MASK 0x0000ff00
+#define SECVID_MS_MAJ_REV_SHIFT 8
u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
};
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
index dc451e0a43c5..58fb3ed6e644 100644
--- a/drivers/crypto/cavium/zip/common.h
+++ b/drivers/crypto/cavium/zip/common.h
@@ -46,8 +46,10 @@
#ifndef __COMMON_H__
#define __COMMON_H__
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -149,6 +151,25 @@ struct zip_operation {
u32 sizeofzops;
};
+static inline int zip_poll_result(union zip_zres_s *result)
+{
+ int retries = 1000;
+
+ while (!result->s.compcode) {
+ if (!--retries) {
+ pr_err("ZIP ERR: request timed out");
+ return -ETIMEDOUT;
+ }
+ udelay(10);
+ /*
+ * Force re-reading of compcode which is updated
+ * by the ZIP coprocessor.
+ */
+ rmb();
+ }
+ return 0;
+}
+
/* error messages */
#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
fmt "\n", __func__, __LINE__, ## args)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 8df4d26cf9d4..b92b6e7e100f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *zip_ops = NULL;
- struct zip_state zip_state;
+ struct zip_state *zip_state;
struct zip_device *zip = NULL;
int ret;
@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
if (!zip)
return -ENODEV;
- memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+ if (!zip_state)
+ return -ENOMEM;
+
zip_ops = &zip_ctx->zip_comp;
zip_ops->input_len = slen;
zip_ops->output_len = *dlen;
memcpy(zip_ops->input, src, slen);
- ret = zip_deflate(zip_ops, &zip_state, zip);
+ ret = zip_deflate(zip_ops, zip_state, zip);
if (!ret) {
*dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen);
}
-
+ kfree(zip_state);
return ret;
}
@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *zip_ops = NULL;
- struct zip_state zip_state;
+ struct zip_state *zip_state;
struct zip_device *zip = NULL;
int ret;
@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
if (!zip)
return -ENODEV;
- memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+ if (!zip_state)
+ return -ENOMEM;
+
zip_ops = &zip_ctx->zip_decomp;
memcpy(zip_ops->input, src, slen);
@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
zip_ops->input_len = slen;
zip_ops->output_len = *dlen;
- ret = zip_inflate(zip_ops, &zip_state, zip);
+ ret = zip_inflate(zip_ops, zip_state, zip);
if (!ret) {
*dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen);
}
-
+ kfree(zip_state);
return ret;
}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
index 9a944b8c1e29..d7133f857d67 100644
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ b/drivers/crypto/cavium/zip/zip_deflate.c
@@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
/* Stats update for compression requests submitted */
atomic64_inc(&zip_dev->stats.comp_req_submit);
- while (!result_ptr->s.compcode)
- continue;
+ /* Wait for completion or error */
+ zip_poll_result(result_ptr);
/* Stats update for compression requests completed */
atomic64_inc(&zip_dev->stats.comp_req_complete);
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
index ccf21fb91513..f174ec29ed69 100644
--- a/drivers/crypto/cavium/zip/zip_device.c
+++ b/drivers/crypto/cavium/zip/zip_device.c
@@ -87,12 +87,12 @@ u32 zip_load_instr(union zip_inst_s *instr,
* Distribute the instructions between the enabled queues based on
* the CPU id.
*/
- if (smp_processor_id() % 2 == 0)
+ if (raw_smp_processor_id() % 2 == 0)
queue = 0;
else
queue = 1;
- zip_dbg("CPU Core: %d Queue number:%d", smp_processor_id(), queue);
+ zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
/* Take cmd buffer lock */
spin_lock(&zip_dev->iq[queue].lock);
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
index 50cbdd83dbf2..7e0d73e2f89e 100644
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ b/drivers/crypto/cavium/zip/zip_inflate.c
@@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
/* Decompression requests submitted stats update */
atomic64_inc(&zip_dev->stats.decomp_req_submit);
- while (!result_ptr->s.compcode)
- continue;
+ /* Wait for completion or error */
+ zip_poll_result(result_ptr);
/* Decompression requests completed stats update */
atomic64_inc(&zip_dev->stats.decomp_req_complete);
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 1cd8aa488185..be055b9547f6 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -113,7 +113,7 @@ struct zip_device *zip_get_device(int node)
*/
int zip_get_node_id(void)
{
- return cpu_to_node(smp_processor_id());
+ return cpu_to_node(raw_smp_processor_id());
}
/* Initializes the ZIP h/w sub-system */
@@ -469,6 +469,8 @@ static int zip_show_stats(struct seq_file *s, void *unused)
struct zip_stats *st;
for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+ u64 pending = 0;
+
if (zip_dev[index]) {
zip = zip_dev[index];
st = &zip->stats;
@@ -476,16 +478,15 @@ static int zip_show_stats(struct seq_file *s, void *unused)
/* Get all the pending requests */
for (q = 0; q < ZIP_NUM_QUEUES; q++) {
val = zip_reg_read((zip->reg_base +
- ZIP_DBG_COREX_STA(q)));
- val = (val >> 32);
- val = val & 0xffffff;
- atomic64_add(val, &st->pending_req);
+ ZIP_DBG_QUEX_STA(q)));
+ pending += val >> 32 & 0xffffff;
}
- avg_chunk = (atomic64_read(&st->comp_in_bytes) /
- atomic64_read(&st->comp_req_complete));
- avg_cr = (atomic64_read(&st->comp_in_bytes) /
- atomic64_read(&st->comp_out_bytes));
+ val = atomic64_read(&st->comp_req_complete);
+ avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
+
+ val = atomic64_read(&st->comp_out_bytes);
+ avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
seq_printf(s, " ZIP Device %d Stats\n"
"-----------------------------------\n"
"Comp Req Submitted : \t%lld\n"
@@ -513,10 +514,7 @@ static int zip_show_stats(struct seq_file *s, void *unused)
(u64)atomic64_read(&st->decomp_in_bytes),
(u64)atomic64_read(&st->decomp_out_bytes),
(u64)atomic64_read(&st->decomp_bad_reqs),
- (u64)atomic64_read(&st->pending_req));
-
- /* Reset pending requests count */
- atomic64_set(&st->pending_req, 0);
+ pending);
}
}
return 0;
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
index 64e051f60784..e1e4fa92ce80 100644
--- a/drivers/crypto/cavium/zip/zip_main.h
+++ b/drivers/crypto/cavium/zip/zip_main.h
@@ -74,7 +74,6 @@ struct zip_stats {
atomic64_t comp_req_complete;
atomic64_t decomp_req_submit;
atomic64_t decomp_req_complete;
- atomic64_t pending_req;
atomic64_t comp_in_bytes;
atomic64_t comp_out_bytes;
atomic64_t decomp_in_bytes;
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
index d0be682305c1..874e0236c87e 100644
--- a/drivers/crypto/cavium/zip/zip_regs.h
+++ b/drivers/crypto/cavium/zip/zip_regs.h
@@ -443,7 +443,7 @@ union zip_corex_bist_status {
static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
{
- if (((param1 <= 1)))
+ if (param1 <= 1)
return 0x0520ull + (param1 & 1) * 0x8ull;
pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
return 0;
@@ -537,7 +537,7 @@ union zip_dbg_corex_inst {
static inline u64 ZIP_DBG_COREX_INST(u64 param1)
{
- if (((param1 <= 1)))
+ if (param1 <= 1)
return 0x0640ull + (param1 & 1) * 0x8ull;
pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
return 0;
@@ -568,7 +568,7 @@ union zip_dbg_corex_sta {
static inline u64 ZIP_DBG_COREX_STA(u64 param1)
{
- if (((param1 <= 1)))
+ if (param1 <= 1)
return 0x0680ull + (param1 & 1) * 0x8ull;
pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
return 0;
@@ -599,7 +599,7 @@ union zip_dbg_quex_sta {
static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x1800ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
return 0;
@@ -817,7 +817,7 @@ union zip_msix_pbax {
static inline u64 ZIP_MSIX_PBAX(u64 param1)
{
- if (((param1 == 0)))
+ if (param1 == 0)
return 0x0000838000FF0000ull;
pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
return 0;
@@ -846,7 +846,7 @@ union zip_msix_vecx_addr {
static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
{
- if (((param1 <= 17)))
+ if (param1 <= 17)
return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
return 0;
@@ -875,7 +875,7 @@ union zip_msix_vecx_ctl {
static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
{
- if (((param1 <= 17)))
+ if (param1 <= 17)
return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
return 0;
@@ -900,7 +900,7 @@ union zip_quex_done {
static inline u64 ZIP_QUEX_DONE(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x2000ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_DONE: %llu\n", param1);
return 0;
@@ -925,7 +925,7 @@ union zip_quex_done_ack {
static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x2200ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
return 0;
@@ -950,7 +950,7 @@ union zip_quex_done_ena_w1c {
static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x2600ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
return 0;
@@ -975,7 +975,7 @@ union zip_quex_done_ena_w1s {
static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x2400ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
return 0;
@@ -1004,7 +1004,7 @@ union zip_quex_done_wait {
static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x2800ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
return 0;
@@ -1029,7 +1029,7 @@ union zip_quex_doorbell {
static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x4000ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
return 0;
@@ -1058,7 +1058,7 @@ union zip_quex_err_ena_w1c {
static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x3600ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
return 0;
@@ -1087,7 +1087,7 @@ union zip_quex_err_ena_w1s {
static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x3400ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
return 0;
@@ -1120,7 +1120,7 @@ union zip_quex_err_int {
static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x3000ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
return 0;
@@ -1150,7 +1150,7 @@ union zip_quex_err_int_w1s {
static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x3200ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
return 0;
@@ -1179,7 +1179,7 @@ union zip_quex_gcfg {
static inline u64 ZIP_QUEX_GCFG(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x1A00ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
return 0;
@@ -1204,7 +1204,7 @@ union zip_quex_map {
static inline u64 ZIP_QUEX_MAP(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x1400ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_MAP: %llu\n", param1);
return 0;
@@ -1236,7 +1236,7 @@ union zip_quex_sbuf_addr {
static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x1000ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
return 0;
@@ -1276,7 +1276,7 @@ union zip_quex_sbuf_ctl {
static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
{
- if (((param1 <= 7)))
+ if (param1 <= 7)
return 0x1200ull + (param1 & 7) * 0x8ull;
pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
return 0;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index d95ec526587a..ff478d826d7d 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -22,11 +22,17 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/ccp.h>
+#include <linux/firmware.h>
#include "sp-dev.h"
#include "psp-dev.h"
+#define SEV_VERSION_GREATER_OR_EQUAL(_maj, _min) \
+ ((psp_master->api_major) >= _maj && \
+ (psp_master->api_minor) >= _min)
+
#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -112,6 +118,8 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
default: return 0;
}
@@ -378,6 +386,79 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
}
EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+static int sev_get_api_version(void)
+{
+ struct sev_user_data_status *status;
+ int error, ret;
+
+ status = &psp_master->status_cmd_buf;
+ ret = sev_platform_status(status, &error);
+ if (ret) {
+ dev_err(psp_master->dev,
+ "SEV: failed to get status. Error: %#x\n", error);
+ return 1;
+ }
+
+ psp_master->api_major = status->api_major;
+ psp_master->api_minor = status->api_minor;
+ psp_master->build = status->build;
+
+ return 0;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+ struct sev_data_download_firmware *data;
+ const struct firmware *firmware;
+ int ret, error, order;
+ struct page *p;
+ u64 data_size;
+
+ ret = request_firmware(&firmware, SEV_FW_FILE, dev);
+ if (ret < 0)
+ return -1;
+
+ /*
+ * SEV FW expects the physical address given to it to be 32
+ * byte aligned. Memory allocated has structure placed at the
+ * beginning followed by the firmware being passed to the SEV
+ * FW. Allocate enough memory for data structure + alignment
+ * padding + SEV FW.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+ order = get_order(firmware->size + data_size);
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ ret = -1;
+ goto fw_err;
+ }
+
+ /*
+ * Copy firmware data to a kernel allocated contiguous
+ * memory region.
+ */
+ data = page_address(p);
+ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+ data->address = __psp_pa(page_address(p) + data_size);
+ data->len = firmware->size;
+
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ if (ret)
+ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+ else
+ dev_info(dev, "SEV firmware update successful\n");
+
+ __free_pages(p, order);
+
+fw_err:
+ release_firmware(firmware);
+
+ return ret;
+}
+
static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
{
struct sev_user_data_pek_cert_import input;
@@ -430,6 +511,46 @@ e_free:
return ret;
}
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+ struct sev_data_get_id *data;
+ u64 data_size, user_size;
+ void *id_blob, *mem;
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+ if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+ * 8-byte aligned. Memory allocated should be enough to
+ * hold data structure + alignment padding + memory
+ * where SEV FW writes the ID.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+ user_size = sizeof(struct sev_user_data_get_id);
+
+ mem = kzalloc(data_size + user_size, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ data = mem;
+ id_blob = mem + data_size;
+
+ data->address = __psp_pa(id_blob);
+ data->len = user_size;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ if (!ret) {
+ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+ ret = -EFAULT;
+ }
+
+ kfree(mem);
+
+ return ret;
+}
+
static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
{
struct sev_user_data_pdh_cert_export input;
@@ -567,6 +688,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
case SEV_PDH_CERT_EXPORT:
ret = sev_ioctl_do_pdh_export(&input);
break;
+ case SEV_GET_ID:
+ ret = sev_ioctl_do_get_id(&input);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -750,7 +874,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void psp_pci_init(void)
{
- struct sev_user_data_status *status;
struct sp_device *sp;
int error, rc;
@@ -760,6 +883,13 @@ void psp_pci_init(void)
psp_master = sp->psp_data;
+ if (sev_get_api_version())
+ goto err;
+
+ if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
+ sev_update_firmware(psp_master->dev) == 0)
+ sev_get_api_version();
+
/* Initialize the platform */
rc = sev_platform_init(&error);
if (rc) {
@@ -767,16 +897,9 @@ void psp_pci_init(void)
goto err;
}
- /* Display SEV firmware version */
- status = &psp_master->status_cmd_buf;
- rc = sev_platform_status(status, &error);
- if (rc) {
- dev_err(sp->dev, "SEV: failed to get status error %#x\n", error);
- goto err;
- }
+ dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
+ psp_master->api_minor, psp_master->build);
- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major,
- status->api_minor, status->build);
return;
err:
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c81f0b11287a..c7e9098a233c 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -78,6 +78,10 @@ struct psp_device {
struct sev_misc_dev *sev_misc;
struct sev_user_data_status status_cmd_buf;
struct sev_data_init init_cmd_buf;
+
+ u8 api_major;
+ u8 api_minor;
+ u8 build;
};
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index df98f7afe645..d2810c183b73 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -42,6 +42,7 @@ struct cc_cipher_ctx {
int cipher_mode;
int flow_mode;
unsigned int flags;
+ bool hw_key;
struct cc_user_key_info user;
struct cc_hw_key_info hw;
struct crypto_shash *shash_tfm;
@@ -49,6 +50,13 @@ struct cc_cipher_ctx {
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+
+ return ctx_p->hw_key;
+}
+
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
{
switch (ctx_p->flow_mode) {
@@ -211,7 +219,7 @@ struct tdes_keys {
u8 key3[DES_KEY_SIZE];
};
-static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
{
switch (slot_num) {
case 0:
@@ -226,69 +234,100 @@ static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
return END_OF_KEYS;
}
-static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
- unsigned int keylen)
+static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
+ unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
- struct cc_crypto_alg *cc_alg =
- container_of(tfm->__crt_alg, struct cc_crypto_alg,
- skcipher_alg.base);
- unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ struct cc_hkey_info hki;
- dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
ctx_p, crypto_tfm_alg_name(tfm), keylen);
dump_byte_array("key", (u8 *)key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
+ /* This check the size of the hardware key token */
+ if (keylen != sizeof(hki)) {
+ dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx_p->flow_mode != S_DIN_to_AES) {
+ dev_err(dev, "HW key not supported for non-AES flows\n");
+ return -EINVAL;
+ }
+
+ memcpy(&hki, key, keylen);
+
+ /* The real key len for crypto op is the size of the HW key
+ * referenced by the HW key slot, not the hardware key token
+ */
+ keylen = hki.keylen;
+
if (validate_keys_sizes(ctx_p, keylen)) {
dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (cc_is_hw_key(tfm)) {
- /* setting HW key slots */
- struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
+ return -EINVAL;
+ }
- if (ctx_p->flow_mode != S_DIN_to_AES) {
- dev_err(dev, "HW key not supported for non-AES flows\n");
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
return -EINVAL;
}
-
- ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
- if (ctx_p->hw.key1_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key1 number (%d)\n",
- hki->hw_key1);
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
return -EINVAL;
}
+ }
- if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (hki->hw_key1 == hki->hw_key2) {
- dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
- hki->hw_key1, hki->hw_key2);
- return -EINVAL;
- }
- ctx_p->hw.key2_slot =
- hw_key_to_cc_hw_key(hki->hw_key2);
- if (ctx_p->hw.key2_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key2 number (%d)\n",
- hki->hw_key2);
- return -EINVAL;
- }
- }
+ ctx_p->keylen = keylen;
+ ctx_p->hw_key = true;
+ dev_dbg(dev, "cc_is_hw_key ret 0");
+
+ return 0;
+}
+
+static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+
+ dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dump_byte_array("key", (u8 *)key, keylen);
- ctx_p->keylen = keylen;
- dev_dbg(dev, "cc_is_hw_key ret 0");
+ /* STAT_PHASE_0: Init and sanity checks */
- return 0;
+ if (validate_keys_sizes(ctx_p, keylen)) {
+ dev_err(dev, "Unsupported key size %d.\n", keylen);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
+ ctx_p->hw_key = false;
+
/*
* Verify DES weak keys
* Note that we're dropping the expanded key since the
@@ -735,6 +774,241 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
/* Block cipher alg */
static const struct cc_alg_template skcipher_algs[] = {
{
+ .name = "xts(paes)",
+ .driver_name = "xts-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "xts512(paes)",
+ .driver_name = "xts-paes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "xts4096(paes)",
+ .driver_name = "xts-paes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv(paes)",
+ .driver_name = "essiv-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv512(paes)",
+ .driver_name = "essiv-paes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv4096(paes)",
+ .driver_name = "essiv-paes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker(paes)",
+ .driver_name = "bitlocker-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker512(paes)",
+ .driver_name = "bitlocker-paes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker4096(paes)",
+ .driver_name = "bitlocker-paes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "ecb(paes)",
+ .driver_name = "ecb-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "cbc(paes)",
+ .driver_name = "cbc-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "ofb(paes)",
+ .driver_name = "ofb-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_OFB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "cts1(cbc(paes))",
+ .driver_name = "cts1-cbc-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC_CTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "ctr(paes)",
+ .driver_name = "ctr-paes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
.name = "xts(aes)",
.driver_name = "xts-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 2a2a6f46c515..68444cfa936b 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -13,18 +13,6 @@
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
-/* Crypto cipher flags */
-#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
-#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
-#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
-#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
-#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
-
-#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
- CC_CRYPTO_CIPHER_KEY_KFDE1 | \
- CC_CRYPTO_CIPHER_KEY_KFDE2 | \
- CC_CRYPTO_CIPHER_KEY_KFDE3)
-
struct cipher_req_ctx {
struct async_gen_req_ctx gen_ctx;
enum cc_req_dma_buf_type dma_buf_type;
@@ -42,18 +30,12 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata);
int cc_cipher_free(struct cc_drvdata *drvdata);
-struct arm_hw_key_info {
- int hw_key1;
- int hw_key2;
-};
+struct cc_hkey_info {
+ u16 keylen;
+ u8 hw_key1;
+ u8 hw_key2;
+} __packed;
-/*
- * This is a stub function that will replaced when we
- * implement secure keys
- */
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
-{
- return false;
-}
+#define CC_HW_KEY_SIZE sizeof(struct cc_hkey_info)
#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 08f8db489cf0..5ca184e42483 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -26,7 +26,8 @@ struct cc_debugfs_ctx {
static struct dentry *cc_debugfs_dir;
static struct debugfs_reg32 debug_regs[] = {
- CC_DEBUG_REG(HOST_SIGNATURE),
+ { .name = "SIGNATURE" }, /* Must be 0th */
+ { .name = "VERSION" }, /* Must be 1st */
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
@@ -34,7 +35,6 @@ static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IMR),
CC_DEBUG_REG(AXIM_CFG),
CC_DEBUG_REG(AXIM_CACHE_PARAMS),
- CC_DEBUG_REG(HOST_VERSION),
CC_DEBUG_REG(GPR_HOST),
CC_DEBUG_REG(AXIM_MON_COMP),
};
@@ -58,6 +58,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
struct debugfs_regset32 *regset;
struct dentry *file;
+ debug_regs[0].offset = drvdata->sig_offset;
+ debug_regs[1].offset = drvdata->ver_offset;
+
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 89ce013ae093..bd974fef05e4 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -168,14 +168,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
if (is_probe)
- dev_info(dev, "Cache params previous: 0x%08X\n", val);
+ dev_dbg(dev, "Cache params previous: 0x%08X\n", val);
cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
if (is_probe)
- dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
- val, cache_params);
+ dev_dbg(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
+ val, cache_params);
return 0;
}
@@ -190,6 +190,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
+ struct clk *clk;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -207,15 +208,36 @@ static int init_cc_resources(struct platform_device *plat_dev)
if (hw_rev->rev >= CC_HW_REV_712) {
new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
+ new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
+ new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
} else {
new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
+ new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
+ new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
}
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
- new_drvdata->clk = of_clk_get(np, 0);
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ switch (PTR_ERR(clk)) {
+ /* Clock is optional so this might be fine */
+ case -ENOENT:
+ break;
+
+ /* Clock not available, let's try again soon */
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+
+ default:
+ dev_err(dev, "Error getting clock: %ld\n",
+ PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ new_drvdata->clk = clk;
+
new_drvdata->coherent = of_dma_is_coherent(np);
/* Get device resources */
@@ -265,7 +287,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
if (rc) {
- dev_err(dev, "Failed in dma_set_mask, mask=%pad\n", &dma_mask);
+ dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask);
return rc;
}
@@ -276,7 +298,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
/* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
+ signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
if (signature_val != hw_rev->sig) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
signature_val, hw_rev->sig);
@@ -287,7 +309,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
+ hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 2048fdeb9579..95f82b2d1e70 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -129,6 +129,8 @@ struct cc_drvdata {
enum cc_hw_rev hw_rev;
u32 hash_len_sz;
u32 axim_mon_offset;
+ u32 sig_offset;
+ u32 ver_offset;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index f51001898ca1..616b2e1c41ba 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -45,7 +45,8 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
-#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
@@ -105,7 +106,8 @@
#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_712_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_630_REG_OFFSET 0xAD8UL
#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 59fe6631e73e..b916c4eb608c 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -203,13 +203,8 @@ static inline void chcr_handle_aead_resp(struct aead_request *req,
int err)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- if (reqctx->b0_dma)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
- reqctx->b0_len, DMA_BIDIRECTIONAL);
+ chcr_aead_common_exit(req);
if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW;
@@ -638,7 +633,6 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
src = sg_next(src);
srcskip = 0;
}
-
if (sg_dma_len(dst) == dstskip) {
dst = sg_next(dst);
dstskip = 0;
@@ -688,6 +682,7 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
int err;
SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+
skcipher_request_set_tfm(subreq, cipher);
skcipher_request_set_callback(subreq, flags, NULL, NULL);
skcipher_request_set_crypt(subreq, src, dst,
@@ -760,13 +755,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
- dst_size = get_space_for_phys_dsgl(nents + 1);
+ dst_size = get_space_for_phys_dsgl(nents);
kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
CHCR_SRC_SG_SIZE, reqctx->src_ofst);
- temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
- (sgl_len(nents + MIN_CIPHER_SG) * 8);
+ temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
+ (sgl_len(nents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
@@ -788,7 +783,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
ablkctx->ciph_mode,
0, 0, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
- 0, 0, dst_size);
+ 0, 1, dst_size);
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
@@ -818,8 +813,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
- +(reqctx->imm ? (IV + wrparam->bytes) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
+ + (reqctx->imm ? (wrparam->bytes) : 0);
create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
@@ -1022,7 +1017,7 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
ret = crypto_cipher_setkey(cipher, key, keylen);
if (ret)
goto out;
- /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
+ crypto_cipher_encrypt_one(cipher, iv, iv);
for (i = 0; i < round8; i++)
gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
@@ -1113,16 +1108,8 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
goto complete;
}
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- err = -EBUSY;
- goto unmap;
- }
-
- }
if (!reqctx->imm) {
- bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
+ bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
if ((bytes + reqctx->processed) >= req->nbytes)
@@ -1133,11 +1120,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
}
- dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
- reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
- dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
- reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
if (err)
goto unmap;
@@ -1212,7 +1195,6 @@ static int process_cipher(struct ablkcipher_request *req,
dnents = sg_nents_xlen(req->dst, req->nbytes,
CHCR_DST_SG_SIZE, 0);
- dnents += 1; // IV
phys_dsgl = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
@@ -1225,8 +1207,7 @@ static int process_cipher(struct ablkcipher_request *req,
}
if (!reqctx->imm) {
- bytes = chcr_sg_ent_in_wr(req->src, req->dst,
- MIN_CIPHER_SG,
+ bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
CIP_SPACE_LEFT(ablkctx->enckey_len),
0, 0);
if ((bytes + reqctx->processed) >= req->nbytes)
@@ -1293,13 +1274,14 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct sk_buff *skb = NULL;
- int err;
+ int err, isfull = 0;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
+ isfull = 1;
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ return -ENOSPC;
}
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1309,7 +1291,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
- return -EINPROGRESS;
+ return isfull ? -EBUSY : -EINPROGRESS;
}
static int chcr_aes_decrypt(struct ablkcipher_request *req)
@@ -1317,12 +1299,13 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
- int err;
+ int err, isfull = 0;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
+ isfull = 1;
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ return -ENOSPC;
}
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1332,7 +1315,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
- return -EINPROGRESS;
+ return isfull ? -EBUSY : -EINPROGRESS;
}
static int chcr_device_init(struct chcr_context *ctx)
@@ -1574,14 +1557,15 @@ static int chcr_ahash_update(struct ahash_request *req)
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
- int error;
+ int error, isfull = 0;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ return -ENOSPC;
}
if (nbytes + req_ctx->reqlen >= bs) {
@@ -1633,7 +1617,7 @@ static int chcr_ahash_update(struct ahash_request *req)
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
- return -EINPROGRESS;
+ return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
return error;
@@ -1710,15 +1694,16 @@ static int chcr_ahash_finup(struct ahash_request *req)
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
- int error;
+ int error, isfull = 0;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ return -ENOSPC;
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
@@ -1777,7 +1762,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
- return -EINPROGRESS;
+ return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
return error;
@@ -1791,7 +1776,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
- int error;
+ int error, isfull = 0;
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
@@ -1799,8 +1784,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ return -ENOSPC;
}
chcr_init_hctx_per_wr(req_ctx);
@@ -1856,7 +1842,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
- return -EINPROGRESS;
+ return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
return error;
@@ -1875,11 +1861,6 @@ static int chcr_ahash_continue(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
if (is_hmac(crypto_ahash_tfm(rtfm))) {
@@ -2192,22 +2173,35 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
}
}
-static int chcr_aead_common_init(struct aead_request *req,
- unsigned short op_type)
+inline void chcr_aead_common_exit(struct aead_request *req)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+}
+
+static int chcr_aead_common_init(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- int error = -EINVAL;
unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
- if (op_type && req->cryptlen < authsize)
+ if (reqctx->op && req->cryptlen < authsize)
goto err;
+ if (reqctx->b0_len)
+ reqctx->scratch_pad = reqctx->iv + IV;
+ else
+ reqctx->scratch_pad = NULL;
+
error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
- op_type);
+ reqctx->op);
if (error) {
error = -ENOMEM;
goto err;
@@ -2244,7 +2238,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
aead_request_set_tfm(subreq, aeadctx->sw_cipher);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) :
@@ -2253,8 +2247,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short qid,
- int size,
- unsigned short op_type)
+ int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
@@ -2278,18 +2271,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (req->cryptlen == 0)
return NULL;
- reqctx->b0_dma = 0;
+ reqctx->b0_len = 0;
+ error = chcr_aead_common_init(req);
+ if (error)
+ return ERR_PTR(error);
+
if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
- subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
+ reqctx->aad_nents = 0;
}
- error = chcr_aead_common_init(req, op_type);
- if (error)
- return ERR_PTR(error);
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
req->assoclen);
dnents += MIN_AUTH_SG; // For IV
@@ -2306,11 +2301,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
- transhdr_len, op_type)) {
+ transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback);
- chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
- op_type);
- return ERR_PTR(chcr_aead_fallback(req, op_type));
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
@@ -2320,7 +2314,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
- temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
/*
* Input order is AAD,IV and Payload. where IV should be included as
@@ -2344,8 +2338,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
else
temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
- chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
- (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
+ (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
temp,
actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1);
@@ -2353,7 +2347,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP ||
+ if (reqctx->op == CHCR_ENCRYPT_OP ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
@@ -2376,20 +2370,18 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
}
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen);
atomic_inc(&adap->chcr_stats.cipher_rqst);
temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0);
reqctx->skb = skb;
- reqctx->op = op_type;
return skb;
err:
- chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
- op_type);
+ chcr_aead_common_exit(req);
return ERR_PTR(error);
}
@@ -2408,11 +2400,14 @@ int chcr_aead_dma_map(struct device *dev,
-authsize : authsize);
if (!req->cryptlen || !dst_size)
return 0;
- reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, reqctx->iv_dma))
return -ENOMEM;
-
+ if (reqctx->b0_len)
+ reqctx->b0_dma = reqctx->iv_dma + IV;
+ else
+ reqctx->b0_dma = 0;
if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL);
@@ -2452,7 +2447,7 @@ void chcr_aead_dma_unmap(struct device *dev,
if (!req->cryptlen || !dst_size)
return;
- dma_unmap_single(dev, reqctx->iv_dma, IV,
+ dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
@@ -2467,8 +2462,7 @@ void chcr_aead_dma_unmap(struct device *dev,
void chcr_add_aead_src_ent(struct aead_request *req,
struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type)
+ unsigned int assoclen)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2476,7 +2470,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
if (reqctx->imm) {
u8 *buf = (u8 *)ulptx;
- if (reqctx->b0_dma) {
+ if (reqctx->b0_len) {
memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
buf += reqctx->b0_len;
}
@@ -2489,7 +2483,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
buf, req->cryptlen, req->assoclen);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
- if (reqctx->b0_dma)
+ if (reqctx->b0_len)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
&reqctx->b0_dma);
ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
@@ -2503,7 +2497,6 @@ void chcr_add_aead_src_ent(struct aead_request *req,
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
unsigned int assoclen,
- unsigned short op_type,
unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2513,32 +2506,30 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
- if (reqctx->b0_dma)
+ if (reqctx->b0_len)
dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
- temp = req->cryptlen + (op_type ? -authsize : authsize);
+ temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
dsgl_walk_end(&dsgl_walk, qid);
}
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
+ void *ulptx,
struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ u8 *buf = ulptx;
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
if (reqctx->imm) {
- u8 *buf = (u8 *)ulptx;
-
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
buf, wrparam->bytes, reqctx->processed);
} else {
- ulptx_walk_init(&ulp_walk, ulptx);
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
reqctx->src_ofst);
reqctx->srcsg = ulp_walk.last_sg;
@@ -2556,7 +2547,6 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
struct dsgl_walk dsgl_walk;
dsgl_walk_init(&dsgl_walk, phys_cpl);
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
reqctx->dstsg = dsgl_walk.last_sg;
@@ -2630,12 +2620,6 @@ int chcr_cipher_dma_map(struct device *dev,
struct ablkcipher_request *req)
{
int error;
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
-
- reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, reqctx->iv_dma))
- return -ENOMEM;
if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
@@ -2658,17 +2642,12 @@ int chcr_cipher_dma_map(struct device *dev,
return 0;
err:
- dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
void chcr_cipher_dma_unmap(struct device *dev,
struct ablkcipher_request *req)
{
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
-
- dma_unmap_single(dev, reqctx->iv_dma, IV,
- DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL);
@@ -2738,7 +2717,8 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
static int ccm_format_packet(struct aead_request *req,
struct chcr_aead_ctx *aeadctx,
unsigned int sub_type,
- unsigned short op_type)
+ unsigned short op_type,
+ unsigned int assoclen)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int rc = 0;
@@ -2748,13 +2728,13 @@ static int ccm_format_packet(struct aead_request *req,
memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
memcpy(reqctx->iv + 4, req->iv, 8);
memset(reqctx->iv + 12, 0, 4);
- *((unsigned short *)(reqctx->scratch_pad + 16)) =
- htons(req->assoclen - 8);
} else {
memcpy(reqctx->iv, req->iv, 16);
- *((unsigned short *)(reqctx->scratch_pad + 16)) =
- htons(req->assoclen);
}
+ if (assoclen)
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(assoclen);
+
generate_b0(req, aeadctx, op_type);
/* zero the ctr value */
memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
@@ -2836,8 +2816,7 @@ static int aead_ccm_validate_input(unsigned short op_type,
static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned short qid,
- int size,
- unsigned short op_type)
+ int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
@@ -2855,22 +2834,20 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
- reqctx->b0_dma = 0;
sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8;
- error = chcr_aead_common_init(req, op_type);
+ reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
+ error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
-
- reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
- error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
+ error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
if (error)
goto err;
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (op_type ? -authsize : authsize),
+ + (reqctx->op ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
@@ -2886,11 +2863,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
- reqctx->b0_len, transhdr_len, op_type)) {
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback);
- chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
- op_type);
- return ERR_PTR(chcr_aead_fallback(req, op_type));
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
@@ -2901,7 +2877,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
- fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
@@ -2910,21 +2886,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- error = ccm_format_packet(req, aeadctx, sub_type, op_type);
+ error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
if (error)
goto dstmap_fail;
-
- reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
- &reqctx->scratch_pad, reqctx->b0_len,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
- reqctx->b0_dma)) {
- error = -ENOMEM;
- goto dstmap_fail;
- }
-
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen);
atomic_inc(&adap->chcr_stats.aead_rqst);
temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
@@ -2933,20 +2899,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0);
reqctx->skb = skb;
- reqctx->op = op_type;
return skb;
dstmap_fail:
kfree_skb(skb);
err:
- chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
+ chcr_aead_common_exit(req);
return ERR_PTR(error);
}
static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned short qid,
- int size,
- unsigned short op_type)
+ int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
@@ -2966,13 +2930,13 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8;
- reqctx->b0_dma = 0;
- error = chcr_aead_common_init(req, op_type);
+ reqctx->b0_len = 0;
+ error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize),
+ (reqctx->op ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
@@ -2986,11 +2950,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
- transhdr_len, op_type)) {
+ transhdr_len, reqctx->op)) {
+
atomic_inc(&adap->chcr_stats.fallback);
- chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
- op_type);
- return ERR_PTR(chcr_aead_fallback(req, op_type));
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
@@ -3001,7 +2965,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
//Offset of tag from end
- temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
a_ctx(tfm)->dev->rx_channel_id, 2,
(assoclen + 1));
@@ -3014,7 +2978,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp);
chcr_req->sec_cpl.seqno_numivs =
- FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH,
@@ -3040,19 +3004,18 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen);
atomic_inc(&adap->chcr_stats.aead_rqst);
temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
- reqctx->op = op_type;
return skb;
err:
- chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
+ chcr_aead_common_exit(req);
return ERR_PTR(error);
}
@@ -3461,6 +3424,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
}
{
SHASH_DESC_ON_STACK(shash, base_hash);
+
shash->tfm = base_hash;
shash->flags = crypto_shash_get_flags(base_hash);
bs = crypto_shash_blocksize(base_hash);
@@ -3585,13 +3549,13 @@ out:
}
static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
int size,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
+ int isfull = 0;
if (!a_ctx(tfm)->dev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
@@ -3600,13 +3564,13 @@ static int chcr_aead_op(struct aead_request *req,
u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
a_ctx(tfm)->tx_qidx)) {
+ isfull = 1;
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ return -ENOSPC;
}
/* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
- op_type);
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
if (IS_ERR(skb) || !skb)
return PTR_ERR(skb);
@@ -3614,7 +3578,7 @@ static int chcr_aead_op(struct aead_request *req,
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
- return -EINPROGRESS;
+ return isfull ? -EBUSY : -EINPROGRESS;
}
static int chcr_aead_encrypt(struct aead_request *req)
@@ -3623,21 +3587,19 @@ static int chcr_aead_encrypt(struct aead_request *req)
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
reqctx->verify = VERIFY_HW;
+ reqctx->op = CHCR_ENCRYPT_OP;
switch (get_aead_subtype(tfm)) {
case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
- return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
- create_authenc_wr);
+ return chcr_aead_op(req, 0, create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
- return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
- create_aead_ccm_wr);
+ return chcr_aead_op(req, 0, create_aead_ccm_wr);
default:
- return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
- create_gcm_wr);
+ return chcr_aead_op(req, 0, create_gcm_wr);
}
}
@@ -3655,21 +3617,18 @@ static int chcr_aead_decrypt(struct aead_request *req)
size = 0;
reqctx->verify = VERIFY_HW;
}
-
+ reqctx->op = CHCR_DECRYPT_OP;
switch (get_aead_subtype(tfm)) {
case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
- return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
- create_authenc_wr);
+ return chcr_aead_op(req, size, create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
- return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
- create_aead_ccm_wr);
+ return chcr_aead_op(req, size, create_aead_ccm_wr);
default:
- return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
- create_gcm_wr);
+ return chcr_aead_op(req, size, create_gcm_wr);
}
}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index dba3dff1e209..1871500309e2 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -146,7 +146,7 @@
kctx_len)
#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
(TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
- sizeof(struct cpl_rx_phys_dsgl))
+ sizeof(struct cpl_rx_phys_dsgl) + AES_BLOCK_SIZE)
#define HASH_TRANSHDR_SIZE(kctx_len)\
(TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
@@ -259,7 +259,6 @@
ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8
#define MAX_DSGL_ENT 32
-#define MIN_CIPHER_SG 1 /* IV */
#define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 1a20424e18c6..de3a9c085daf 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -56,7 +56,7 @@
#define MAX_SALT 4
#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
sizeof(struct cpl_rx_phys_dsgl) + \
- sizeof(struct ulptx_sgl))
+ sizeof(struct ulptx_sgl) + 16) //IV
#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
DUMMY_BYTES + \
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index c8e8972af283..54835cb109e5 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -190,8 +190,8 @@ struct chcr_aead_reqctx {
short int dst_nents;
u16 imm;
u16 verify;
- u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
- unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
+ u8 *scratch_pad;
};
struct ulptx_walk {
@@ -295,7 +295,6 @@ struct chcr_blkcipher_req_ctx {
unsigned int src_ofst;
unsigned int dst_ofst;
unsigned int op;
- dma_addr_t iv_dma;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
@@ -312,8 +311,7 @@ struct chcr_alg_template {
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
- int size,
- unsigned short op_type);
+ int size);
void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
@@ -322,12 +320,12 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
unsigned short op_type);
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen, unsigned short op_type,
+ unsigned int assoclen,
unsigned short qid);
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
- unsigned int assoclen, unsigned short op_type);
+ unsigned int assoclen);
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
+ void *ulptx,
struct cipher_wr_param *wrparam);
int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
@@ -340,4 +338,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
struct hash_wr_param *param);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
+void chcr_aead_common_exit(struct aead_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 8e0aa3f175c9..461b97e2f1fd 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos)
{
+ struct cpl_tx_pkt_core *cpl;
+ struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
- struct sge_eth_txq *q;
- struct cpl_tx_pkt_core *cpl;
- u64 cntrl = 0;
u32 ctrl0, qidx;
+ u64 cntrl = 0;
+ int left;
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ left = (void *)q->q.stat - pos;
+ if (!left)
+ pos = q->q.desc;
+
cpl = (struct cpl_tx_pkt_core *)pos;
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -382,18 +387,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
void *pos,
struct ipsec_sa_entry *sa_entry)
{
- struct adapter *adap;
- struct port_info *pi;
- struct sge_eth_txq *q;
- unsigned int len, qidx;
struct _key_ctx *key_ctx;
int left, eoq, key_len;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ unsigned int qidx;
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + pi->first_qset];
- len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
key_len = sa_entry->kctx_len;
/* end of queue, reset pos to start of queue */
@@ -411,19 +415,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
pos += sizeof(struct _key_ctx);
left -= sizeof(struct _key_ctx);
- if (likely(len <= left)) {
+ if (likely(key_len <= left)) {
memcpy(key_ctx->key, sa_entry->key, key_len);
pos += key_len;
} else {
- if (key_len <= left) {
- memcpy(pos, sa_entry->key, key_len);
- pos += key_len;
- } else {
- memcpy(pos, sa_entry->key, left);
- memcpy(q->q.desc, sa_entry->key + left,
- key_len - left);
- pos = (u8 *)q->q.desc + (key_len - left);
- }
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
}
/* Copy CPL TX PKT XT */
pos = copy_cpltx_pktxt(skb, dev, pos);
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index f4b8f1ec0061..a53a0e6ba024 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -67,11 +67,6 @@ enum {
CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
};
-#define TLS_RCV_ST_READ_HEADER 0xF0
-#define TLS_RCV_ST_READ_BODY 0xF1
-#define TLS_RCV_ST_READ_DONE 0xF2
-#define TLS_RCV_ST_READ_NB 0xF3
-
#define LISTEN_INFO_HASH_SIZE 32
#define RSPQ_HASH_BITS 5
struct listen_info {
@@ -149,6 +144,7 @@ struct chtls_dev {
struct list_head rcu_node;
struct list_head na_node;
unsigned int send_page_order;
+ int max_host_sndbuf;
struct key_map kmap;
};
@@ -278,6 +274,7 @@ struct tlsrx_cmp_hdr {
#define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U)
#define TLSRX_HDR_PKT_ERROR_M 0x1F
+#define CONTENT_TYPE_ERROR 0x7F
struct ulp_mem_rw {
__be32 cmd;
@@ -347,8 +344,8 @@ enum {
ULPCB_FLAG_HOLD = 1 << 3, /* skb not ready for Tx yet */
ULPCB_FLAG_COMPL = 1 << 4, /* request WR completion */
ULPCB_FLAG_URG = 1 << 5, /* urgent data */
- ULPCB_FLAG_TLS_ND = 1 << 6, /* payload of zero length */
- ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */
+ ULPCB_FLAG_TLS_HDR = 1 << 6, /* payload with tls hdr */
+ ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */
};
/* The ULP mode/submode of an skbuff */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 82a473a0cefa..2bb6f0380758 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1537,6 +1537,10 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
struct sock *sk;
sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
skb_dst_set(skb, NULL);
process_cpl_msg(chtls_recv_data, sk, skb);
return 0;
@@ -1585,6 +1589,10 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
struct sock *sk;
sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
skb_dst_set(skb, NULL);
process_cpl_msg(chtls_recv_pdu, sk, skb);
return 0;
@@ -1600,12 +1608,14 @@ static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
{
- struct cpl_rx_tls_cmp *cmp_cpl = cplhdr(skb);
+ struct tlsrx_cmp_hdr *tls_hdr_pkt;
+ struct cpl_rx_tls_cmp *cmp_cpl;
struct sk_buff *skb_rec;
struct chtls_sock *csk;
struct chtls_hws *tlsk;
struct tcp_sock *tp;
+ cmp_cpl = cplhdr(skb);
csk = rcu_dereference_sk_user_data(sk);
tlsk = &csk->tlshws;
tp = tcp_sk(sk);
@@ -1615,16 +1625,18 @@ static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
skb_reset_transport_header(skb);
__skb_pull(skb, sizeof(*cmp_cpl));
+ tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
+ if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
+ tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
if (!skb->data_len)
- __skb_trim(skb, CPL_RX_TLS_CMP_LENGTH_G
- (ntohl(cmp_cpl->pdulength_length)));
+ __skb_trim(skb, TLS_HEADER_LENGTH);
tp->rcv_nxt +=
CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
+ ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
if (!skb_rec) {
- ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_ND;
__skb_queue_tail(&sk->sk_receive_queue, skb);
} else {
chtls_set_hdrlen(skb, tlsk->pldlen);
@@ -1646,6 +1658,10 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
struct sock *sk;
sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
skb_dst_set(skb, NULL);
process_cpl_msg(chtls_rx_hdr, sk, skb);
@@ -2105,6 +2121,10 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
struct sock *sk;
sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
process_cpl_msg(chtls_rx_ack, sk, skb);
return 0;
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 54a13aa99121..55d50140f9e5 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -213,7 +213,7 @@ static int chtls_key_info(struct chtls_sock *csk,
struct _key_ctx *kctx,
u32 keylen, u32 optname)
{
- unsigned char key[CHCR_KEYCTX_CIPHER_KEY_SIZE_256];
+ unsigned char key[AES_KEYSIZE_128];
struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
unsigned char ghash_h[AEAD_H_SIZE];
struct crypto_cipher *cipher;
@@ -228,10 +228,6 @@ static int chtls_key_info(struct chtls_sock *csk,
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else if (keylen == AES_KEYSIZE_192) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
- } else if (keylen == AES_KEYSIZE_256) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 5a75be43950f..51fc6821cbbf 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -907,11 +907,83 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
}
/* Read TLS header to find content type and data length */
-static u16 tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
+static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
{
if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
return -EFAULT;
- return (__force u16)cpu_to_be16(thdr->length);
+ return (__force int)cpu_to_be16(thdr->length);
+}
+
+static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+{
+ return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
+}
+
+static int csk_wait_memory(struct chtls_dev *cdev,
+ struct sock *sk, long *timeo_p)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int sndbuf, err = 0;
+ long current_timeo;
+ long vm_wait = 0;
+ bool noblock;
+
+ current_timeo = *timeo_p;
+ noblock = (*timeo_p ? false : true);
+ sndbuf = cdev->max_host_sndbuf;
+ if (csk_mem_free(cdev, sk)) {
+ current_timeo = (prandom_u32() % (HZ / 5)) + 2;
+ vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ }
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ goto do_error;
+ if (!*timeo_p) {
+ if (noblock)
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ goto do_nonblock;
+ }
+ if (signal_pending(current))
+ goto do_interrupted;
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ if (csk_mem_free(cdev, sk) && !vm_wait)
+ break;
+
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ sk->sk_write_pending++;
+ sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+ sk->sk_write_pending--;
+
+ if (vm_wait) {
+ vm_wait -= current_timeo;
+ current_timeo = *timeo_p;
+ if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
+ current_timeo -= vm_wait;
+ if (current_timeo < 0)
+ current_timeo = 0;
+ }
+ vm_wait = 0;
+ }
+ *timeo_p = current_timeo;
+ }
+do_rm_wq:
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+do_error:
+ err = -EPIPE;
+ goto do_rm_wq;
+do_nonblock:
+ err = -EAGAIN;
+ goto do_rm_wq;
+do_interrupted:
+ err = sock_intr_errno(*timeo_p);
+ goto do_rm_wq;
}
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
@@ -952,6 +1024,8 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
copy = mss - skb->len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+ if (!csk_mem_free(cdev, sk))
+ goto wait_for_sndbuf;
if (is_tls_tx(csk) && !csk->tlshws.txleft) {
struct tls_hdr hdr;
@@ -1009,9 +1083,10 @@ new_buf:
int off = TCP_OFF(sk);
bool merge;
- if (page)
- pg_size <<= compound_order(page);
+ if (!page)
+ goto wait_for_memory;
+ pg_size <<= compound_order(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
@@ -1099,8 +1174,10 @@ copy:
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
push_frames_if_head(sk);
continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- err = sk_stream_wait_memory(sk, &timeo);
+ err = csk_wait_memory(cdev, sk, &timeo);
if (err)
goto do_error;
}
@@ -1131,6 +1208,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
struct chtls_sock *csk;
+ struct chtls_dev *cdev;
int mss, err, copied;
struct tcp_sock *tp;
long timeo;
@@ -1138,6 +1216,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
tp = tcp_sk(sk);
copied = 0;
csk = rcu_dereference_sk_user_data(sk);
+ cdev = csk->cdev;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo);
@@ -1152,10 +1231,11 @@ int chtls_sendpage(struct sock *sk, struct page *page,
struct sk_buff *skb = skb_peek_tail(&csk->txq);
int copy, i;
- copy = mss - skb->len;
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
- copy <= 0) {
+ (copy = mss - skb->len) <= 0) {
new_buf:
+ if (!csk_mem_free(cdev, sk))
+ goto wait_for_sndbuf;
if (is_tls_tx(csk)) {
skb = get_record_skb(sk,
@@ -1167,7 +1247,7 @@ new_buf:
skb = get_tx_skb(sk, 0);
}
if (!skb)
- goto do_error;
+ goto wait_for_memory;
copy = mss;
}
if (copy > size)
@@ -1206,8 +1286,12 @@ new_buf:
if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
push_frames_if_head(sk);
continue;
-
+wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ err = csk_wait_memory(cdev, sk, &timeo);
+ if (err)
+ goto do_error;
}
out:
csk_reset_flag(csk, CSK_TX_MORE_DATA);
@@ -1409,7 +1493,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
chtls_cleanup_rbuf(sk, copied);
sk_wait_data(sk, &timeo, NULL);
- continue;
+ continue;
found_ok_skb:
if (!skb->len) {
skb_dst_set(skb, NULL);
@@ -1449,31 +1533,13 @@ found_ok_skb:
}
}
}
- if (hws->rstate == TLS_RCV_ST_READ_BODY) {
- if (skb_copy_datagram_msg(skb, offset,
- msg, avail)) {
- if (!copied) {
- copied = -EFAULT;
- break;
- }
- }
- } else {
- struct tlsrx_cmp_hdr *tls_hdr_pkt =
- (struct tlsrx_cmp_hdr *)skb->data;
-
- if ((tls_hdr_pkt->res_to_mac_error &
- TLSRX_HDR_PKT_ERROR_M))
- tls_hdr_pkt->type = 0x7F;
-
- /* CMP pld len is for recv seq */
- hws->rcvpld = skb->hdr_len;
- if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
- if (!copied) {
- copied = -EFAULT;
- break;
- }
+ if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
+ if (!copied) {
+ copied = -EFAULT;
+ break;
}
}
+
copied += avail;
len -= avail;
hws->copied_seq += avail;
@@ -1481,32 +1547,20 @@ skip_copy:
if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
tp->urg_data = 0;
- if (hws->rstate == TLS_RCV_ST_READ_BODY &&
- (avail + offset) >= skb->len) {
+ if ((avail + offset) >= skb->len) {
if (likely(skb))
chtls_free_skb(sk, skb);
buffers_freed++;
- hws->rstate = TLS_RCV_ST_READ_HEADER;
- atomic_inc(&adap->chcr_stats.tls_pdu_rx);
- tp->copied_seq += hws->rcvpld;
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ tp->copied_seq += skb->len;
+ hws->rcvpld = skb->hdr_len;
+ } else {
+ tp->copied_seq += hws->rcvpld;
+ }
hws->copied_seq = 0;
if (copied >= target &&
!skb_peek(&sk->sk_receive_queue))
break;
- } else {
- if (likely(skb)) {
- if (ULP_SKB_CB(skb)->flags &
- ULPCB_FLAG_TLS_ND)
- hws->rstate =
- TLS_RCV_ST_READ_HEADER;
- else
- hws->rstate =
- TLS_RCV_ST_READ_BODY;
- chtls_free_skb(sk, skb);
- }
- buffers_freed++;
- tp->copied_seq += avail;
- hws->copied_seq = 0;
}
} while (len > 0);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 007c45c38fc7..9b07f9165658 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -216,7 +216,6 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info)
cdev->lldi = lldi;
cdev->pdev = lldi->pdev;
cdev->tids = lldi->tids;
- cdev->ports = (struct net_device **)(cdev + 1);
cdev->ports = lldi->ports;
cdev->mtus = lldi->mtus;
cdev->tids = lldi->tids;
@@ -239,6 +238,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info)
spin_lock_init(&cdev->idr_lock);
cdev->send_page_order = min_t(uint, get_order(32768),
send_page_order);
+ cdev->max_host_sndbuf = 48 * 1024;
if (lldi->vr->key.size)
if (chtls_init_kmap(cdev, lldi))
@@ -250,7 +250,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info)
return cdev;
out_rspq_skb:
- for (j = 0; j <= i; j++)
+ for (j = 0; j < i; j++)
kfree_skb(cdev->rspq_skb_cache[j]);
kfree_skb(cdev->askb);
out_skb:
@@ -441,7 +441,7 @@ nomem:
static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
int __user *optlen)
{
- struct tls_crypto_info crypto_info;
+ struct tls_crypto_info crypto_info = { 0 };
crypto_info.version = TLS_1_2_VERSION;
if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
@@ -491,9 +491,13 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
- rc = copy_from_user(crypto_info, optval,
- sizeof(struct
- tls12_crypto_info_aes_gcm_128));
+ /* Obtain version and type from previous copy */
+ crypto_info[0] = tmp_crypto_info;
+ /* Now copy the following data */
+ rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+ optval + sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_128)
+ - sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 86f5f459762e..2cfabb99cb6e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -319,8 +319,7 @@ static int exynos_rng_remove(struct platform_device *pdev)
static int __maybe_unused exynos_rng_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_rng_dev *rng = platform_get_drvdata(pdev);
+ struct exynos_rng_dev *rng = dev_get_drvdata(dev);
int ret;
/* If we were never seeded then after resume it will be the same */
@@ -350,8 +349,7 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
static int __maybe_unused exynos_rng_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_rng_dev *rng = platform_get_drvdata(pdev);
+ struct exynos_rng_dev *rng = dev_get_drvdata(dev);
int ret;
/* Never seeded so nothing to do */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index b6be62025325..4e86f864a952 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -352,6 +353,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* H/W capabilities selection */
val = EIP197_FUNCTION_RSVD;
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+ val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
@@ -537,6 +539,27 @@ finalize:
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
+inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+ struct safexcel_result_desc *rdesc)
+{
+ if (likely(!rdesc->result_data.error_code))
+ return 0;
+
+ if (rdesc->result_data.error_code & 0x407f) {
+ /* Fatal error (bits 0-7, 14) */
+ dev_err(priv->dev,
+ "cipher: result: result descriptor error (%d)\n",
+ rdesc->result_data.error_code);
+ return -EIO;
+ } else if (rdesc->result_data.error_code == BIT(9)) {
+ /* Authentication failed */
+ return -EBADMSG;
+ }
+
+ /* All other non-fatal errors */
+ return -EINVAL;
+}
+
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -770,6 +793,9 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha1,
&safexcel_alg_hmac_sha224,
&safexcel_alg_hmac_sha256,
+ &safexcel_alg_authenc_hmac_sha1_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha224_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha256_cbc_aes,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -781,6 +807,8 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
+ else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+ ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
else
ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
@@ -794,6 +822,8 @@ fail:
for (j = 0; j < i; j++) {
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
+ else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
+ crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
else
crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
}
@@ -808,6 +838,8 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
+ else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+ crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
else
crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index b470a849721f..8b3ee9b59f53 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -11,8 +11,10 @@
#ifndef __SAFEXCEL_H__
#define __SAFEXCEL_H__
+#include <crypto/aead.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#define EIP197_HIA_VERSION_LE 0xca35
@@ -20,7 +22,7 @@
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 5
+#define EIP197_MAX_TOKENS 8
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
#define EIP197_MAX_BATCH_SZ 64
@@ -28,6 +30,17 @@
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Custom on-stack requests (for invalidation) */
+#define EIP197_SKCIPHER_REQ_SIZE sizeof(struct skcipher_request) + \
+ sizeof(struct safexcel_cipher_req)
+#define EIP197_AHASH_REQ_SIZE sizeof(struct ahash_request) + \
+ sizeof(struct safexcel_ahash_req)
+#define EIP197_AEAD_REQ_SIZE sizeof(struct aead_request) + \
+ sizeof(struct safexcel_cipher_req)
+#define EIP197_REQUEST_ON_STACK(name, type, size) \
+ char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \
+ struct type##_request *name = (void *)__##name##_desc
+
/* Register base offsets */
#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
@@ -274,7 +287,7 @@ struct safexcel_context_record {
u32 control0;
u32 control1;
- __le32 data[12];
+ __le32 data[24];
} __packed;
/* control0 */
@@ -286,8 +299,8 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_TYPE_CRYPTO_IN 0x5
#define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT 0x6
#define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN 0x7
-#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT 0x14
-#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_OUT 0x15
+#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT 0xe
+#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN 0xf
#define CONTEXT_CONTROL_RESTART_HASH BIT(4)
#define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5)
#define CONTEXT_CONTROL_SIZE(n) ((n) << 8)
@@ -391,11 +404,15 @@ struct safexcel_token {
u8 opcode:4;
} __packed;
+#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
+
#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
#define EIP197_TOKEN_OPCODE_INSERT 0x2
#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
+#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
+#define EIP197_TOKEN_OPCODE_VERIFY 0xd
#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
static inline void eip197_noop_token(struct safexcel_token *token)
@@ -479,6 +496,7 @@ struct safexcel_ring {
enum safexcel_alg_type {
SAFEXCEL_ALG_TYPE_SKCIPHER,
+ SAFEXCEL_ALG_TYPE_AEAD,
SAFEXCEL_ALG_TYPE_AHASH,
};
@@ -581,6 +599,16 @@ struct safexcel_context {
bool exit_inv;
};
+struct safexcel_ahash_export_state {
+ u64 len;
+ u64 processed;
+
+ u32 digest;
+
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u8 cache[SHA256_BLOCK_SIZE];
+};
+
/*
* Template structure to describe the algorithms in order to register them.
* It also has the purpose to contain our private structure and is actually
@@ -591,6 +619,7 @@ struct safexcel_alg_template {
enum safexcel_alg_type type;
union {
struct skcipher_alg skcipher;
+ struct aead_alg aead;
struct ahash_alg ahash;
} alg;
};
@@ -601,6 +630,8 @@ struct safexcel_inv_result {
};
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+ struct safexcel_result_desc *rdesc);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
@@ -625,6 +656,8 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
bool first, bool last,
dma_addr_t data, u32 len);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+ void *istate, void *ostate);
/* available algorithms */
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
@@ -635,5 +668,8 @@ extern struct safexcel_alg_template safexcel_alg_sha256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index bafb60505fab..6bb60fda2043 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -12,8 +12,12 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "safexcel.h"
@@ -28,9 +32,16 @@ struct safexcel_cipher_ctx {
struct safexcel_crypto_priv *priv;
u32 mode;
+ bool aead;
__le32 key[8];
unsigned int key_len;
+
+ /* All the below is AEAD specific */
+ u32 alg;
+ u32 state_sz;
+ u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_cipher_req {
@@ -38,18 +49,16 @@ struct safexcel_cipher_req {
bool needs_inv;
};
-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
- struct crypto_async_request *async,
- struct safexcel_command_desc *cdesc,
- u32 length)
+static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc,
+ u32 length)
{
- struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_token *token;
unsigned offset = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
+ memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
}
@@ -65,8 +74,64 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
EIP197_TOKEN_INS_TYPE_OUTPUT;
}
-static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
- unsigned int len)
+static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc,
+ enum safexcel_cipher_direction direction,
+ u32 cryptlen, u32 assoclen, u32 digestsize)
+{
+ struct safexcel_token *token;
+ unsigned offset = 0;
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ offset = AES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ }
+
+ token = (struct safexcel_token *)(cdesc->control_data.token + offset);
+
+ if (direction == SAFEXCEL_DECRYPT)
+ cryptlen -= digestsize;
+
+ token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[0].packet_length = assoclen;
+ token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+ token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[1].packet_length = cryptlen;
+ token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[1].instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+ if (direction == SAFEXCEL_ENCRYPT) {
+ token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
+ token[2].packet_length = digestsize;
+ token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ } else {
+ token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ token[2].packet_length = digestsize;
+ token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+ token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ token[3].packet_length = digestsize |
+ EIP197_TOKEN_HASH_RESULT_VERIFY;
+ token[3].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
+}
+
+static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -98,41 +163,123 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return 0;
}
+static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_ahash_export_state istate, ostate;
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+ goto badkey;
+
+ if (keys.enckeylen > sizeof(ctx->key))
+ goto badkey;
+
+ /* Encryption key */
+ if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ memcmp(ctx->key, keys.enckey, keys.enckeylen))
+ ctx->base.needs_inv = true;
+
+ /* Auth key */
+ switch (ctx->alg) {
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
+ if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
+ if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
+ if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ default:
+ dev_err(priv->dev, "aead: unsupported hash algorithm\n");
+ goto badkey;
+ }
+
+ crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
+ CRYPTO_TFM_RES_MASK);
+
+ if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
+ memcmp(ctx->opad, ostate.state, ctx->state_sz)))
+ ctx->base.needs_inv = true;
+
+ /* Now copy the keys into the context */
+ memcpy(ctx->key, keys.enckey, keys.enckeylen);
+ ctx->key_len = keys.enckeylen;
+
+ memcpy(ctx->ipad, &istate.state, ctx->state_sz);
+ memcpy(ctx->opad, &ostate.state, ctx->state_sz);
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
+
+badkey:
+ crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
+ struct safexcel_cipher_req *sreq,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (sreq->direction == SAFEXCEL_ENCRYPT)
+ if (ctx->aead) {
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+ else
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+ } else {
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
- else
- cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
+
+ /* The decryption control type is a combination of the
+ * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all
+ * types.
+ */
+ if (sreq->direction == SAFEXCEL_DECRYPT)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN;
+ }
cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
cdesc->control_data.control1 |= ctx->mode;
+ if (ctx->aead)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
+ ctx->alg;
+
switch (ctx->key_len) {
case AES_KEYSIZE_128:
cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
- ctrl_size = 4;
break;
case AES_KEYSIZE_192:
cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
- ctrl_size = 6;
break;
case AES_KEYSIZE_256:
cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
- ctrl_size = 8;
break;
default:
dev_err(priv->dev, "aes keysize not supported: %u\n",
ctx->key_len);
return -EINVAL;
}
+
+ ctrl_size = ctx->key_len / sizeof(u32);
+ if (ctx->aead)
+ /* Take in account the ipad+opad digests */
+ ctrl_size += ctx->state_sz / sizeof(u32) * 2;
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
return 0;
@@ -140,9 +287,12 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen,
+ struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
- struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
int ndesc = 0;
@@ -158,12 +308,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
break;
}
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "cipher: result: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
+ if (likely(!*ret))
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
} while (!rdesc->last_seg);
@@ -171,16 +317,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (req->src == req->dst) {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ if (src == dst) {
+ dma_unmap_sg(priv->dev, src,
+ sg_nents_for_len(src, cryptlen),
DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ dma_unmap_sg(priv->dev, src,
+ sg_nents_for_len(src, cryptlen),
DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, req->dst,
- sg_nents_for_len(req->dst, req->cryptlen),
+ dma_unmap_sg(priv->dev, dst,
+ sg_nents_for_len(dst, cryptlen),
DMA_FROM_DEVICE);
}
@@ -189,39 +335,43 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
return ndesc;
}
-static int safexcel_aes_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+static int safexcel_aes_send(struct crypto_async_request *base, int ring,
+ struct safexcel_request *request,
+ struct safexcel_cipher_req *sreq,
+ struct scatterlist *src, struct scatterlist *dst,
+ unsigned int cryptlen, unsigned int assoclen,
+ unsigned int digestsize, u8 *iv, int *commands,
+ int *results)
{
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
+ unsigned int totlen = cryptlen + assoclen;
+ int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
int i, ret = 0;
- if (req->src == req->dst) {
- nr_src = dma_map_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ if (src == dst) {
+ nr_src = dma_map_sg(priv->dev, src,
+ sg_nents_for_len(src, totlen),
DMA_BIDIRECTIONAL);
nr_dst = nr_src;
if (!nr_src)
return -EINVAL;
} else {
- nr_src = dma_map_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ nr_src = dma_map_sg(priv->dev, src,
+ sg_nents_for_len(src, totlen),
DMA_TO_DEVICE);
if (!nr_src)
return -EINVAL;
- nr_dst = dma_map_sg(priv->dev, req->dst,
- sg_nents_for_len(req->dst, req->cryptlen),
+ nr_dst = dma_map_sg(priv->dev, dst,
+ sg_nents_for_len(dst, totlen),
DMA_FROM_DEVICE);
if (!nr_dst) {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ dma_unmap_sg(priv->dev, src,
+ sg_nents_for_len(src, totlen),
DMA_TO_DEVICE);
return -EINVAL;
}
@@ -229,10 +379,17 @@ static int safexcel_aes_send(struct crypto_async_request *async,
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+ if (ctx->aead) {
+ memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+ ctx->ipad, ctx->state_sz);
+ memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
+ ctx->opad, ctx->state_sz);
+ }
+
spin_lock_bh(&priv->ring[ring].egress_lock);
/* command descriptors */
- for_each_sg(req->src, sg, nr_src, i) {
+ for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
@@ -240,7 +397,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
- sg_dma_address(sg), len, req->cryptlen,
+ sg_dma_address(sg), len, totlen,
ctx->base.ctxr_dma);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
@@ -250,8 +407,14 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, async, cdesc);
- safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
+ safexcel_context_control(ctx, base, sreq, cdesc);
+ if (ctx->aead)
+ safexcel_aead_token(ctx, iv, cdesc,
+ sreq->direction, cryptlen,
+ assoclen, digestsize);
+ else
+ safexcel_skcipher_token(ctx, iv, cdesc,
+ cryptlen);
}
queued -= len;
@@ -260,7 +423,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
}
/* result descriptors */
- for_each_sg(req->dst, sg, nr_dst, i) {
+ for_each_sg(dst, sg, nr_dst, i) {
bool first = !i, last = (i == nr_dst - 1);
u32 len = sg_dma_len(sg);
@@ -276,7 +439,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
- request->req = &req->base;
+ request->req = base;
*commands = n_cdesc;
*results = n_rdesc;
@@ -291,16 +454,16 @@ cdesc_rollback:
spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (req->src == req->dst) {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ if (src == dst) {
+ dma_unmap_sg(priv->dev, src,
+ sg_nents_for_len(src, totlen),
DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
+ dma_unmap_sg(priv->dev, src,
+ sg_nents_for_len(src, totlen),
DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, req->dst,
- sg_nents_for_len(req->dst, req->cryptlen),
+ dma_unmap_sg(priv->dev, dst,
+ sg_nents_for_len(dst, totlen),
DMA_FROM_DEVICE);
}
@@ -309,11 +472,10 @@ cdesc_rollback:
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
- struct crypto_async_request *async,
+ struct crypto_async_request *base,
bool *should_complete, int *ret)
{
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_result_desc *rdesc;
int ndesc = 0, enq_ret;
@@ -354,7 +516,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ctx->base.ring = ring;
spin_lock_bh(&priv->ring[ring].queue_lock);
- enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+ enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS)
@@ -368,9 +530,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return ndesc;
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
@@ -381,24 +544,48 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
err = safexcel_handle_inv_result(priv, ring, async,
should_complete, ret);
} else {
- err = safexcel_handle_req_result(priv, ring, async,
+ err = safexcel_handle_req_result(priv, ring, async, req->src,
+ req->dst, req->cryptlen, sreq,
should_complete, ret);
}
return err;
}
-static int safexcel_cipher_send_inv(struct crypto_async_request *async,
+static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct aead_request *req = aead_request_cast(async);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+ int err;
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async, req->src,
+ req->dst,
+ req->cryptlen + crypto_aead_authsize(tfm),
+ sreq, should_complete, ret);
+ }
+
+ return err;
+}
+
+static int safexcel_cipher_send_inv(struct crypto_async_request *base,
int ring, struct safexcel_request *request,
int *commands, int *results)
{
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(async, priv,
- ctx->base.ctxr_dma, ring, request);
+ ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
+ request);
if (unlikely(ret))
return ret;
@@ -408,9 +595,9 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
-static int safexcel_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request,
+ int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -421,59 +608,108 @@ static int safexcel_send(struct crypto_async_request *async,
BUG_ON(priv->version == EIP97 && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request,
- commands, results);
+ ret = safexcel_cipher_send_inv(async, ring, request, commands,
+ results);
+ else
+ ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+ req->dst, req->cryptlen, 0, 0, req->iv,
+ commands, results);
+ return ret;
+}
+
+static int safexcel_aead_send(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request, int *commands,
+ int *results)
+{
+ struct aead_request *req = aead_request_cast(async);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
+ if (sreq->needs_inv)
+ ret = safexcel_cipher_send_inv(async, ring, request, commands,
+ results);
else
- ret = safexcel_aes_send(async, ring, request,
+ ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+ req->dst, req->cryptlen, req->assoclen,
+ crypto_aead_authsize(tfm), req->iv,
commands, results);
return ret;
}
-static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
+static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
+ struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
+ struct safexcel_inv_result *result)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(req, 0, sizeof(struct skcipher_request));
+ init_completion(&result->completion);
- /* create invalidation request */
- init_completion(&result.completion);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
-
- skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx = crypto_tfm_ctx(base->tfm);
ctx->base.exit_inv = true;
sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+ crypto_enqueue_request(&priv->ring[ring].queue, base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion(&result.completion);
+ wait_for_completion(&result->completion);
- if (result.error) {
+ if (result->error) {
dev_warn(priv->dev,
"cipher: sync: invalidate: completion error %d\n",
- result.error);
- return result.error;
+ result->error);
+ return result->error;
}
return 0;
}
-static int safexcel_aes(struct skcipher_request *req,
- enum safexcel_cipher_direction dir, u32 mode)
+static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_inv_result result = {};
+
+ memset(req, 0, sizeof(struct skcipher_request));
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
+ skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+
+ return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
+{
+ EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
+ struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+ struct safexcel_inv_result result = {};
+
+ memset(req, 0, sizeof(struct aead_request));
+
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
+ aead_request_set_tfm(req, __crypto_aead_cast(tfm));
+
+ return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_aes(struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
+ enum safexcel_cipher_direction dir, u32 mode)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
@@ -489,7 +725,7 @@ static int safexcel_aes(struct skcipher_request *req,
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
- EIP197_GFP_FLAGS(req->base),
+ EIP197_GFP_FLAGS(*base),
&ctx->base.ctxr_dma);
if (!ctx->base.ctxr)
return -ENOMEM;
@@ -498,7 +734,7 @@ static int safexcel_aes(struct skcipher_request *req,
ring = ctx->base.ring;
spin_lock_bh(&priv->ring[ring].queue_lock);
- ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+ ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
queue_work(priv->ring[ring].workqueue,
@@ -509,14 +745,14 @@ static int safexcel_aes(struct skcipher_request *req,
static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(req, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_aes(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
}
static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(req, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_aes(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
}
static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -526,34 +762,64 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
container_of(tfm->__crt_alg, struct safexcel_alg_template,
alg.skcipher.base);
- ctx->priv = tmpl->priv;
- ctx->base.send = safexcel_send;
- ctx->base.handle_result = safexcel_handle_result;
-
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct safexcel_cipher_req));
+ ctx->priv = tmpl->priv;
+
+ ctx->base.send = safexcel_skcipher_send;
+ ctx->base.handle_result = safexcel_skcipher_handle_result;
return 0;
}
-static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
+static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- int ret;
- memzero_explicit(ctx->key, 8 * sizeof(u32));
+ memzero_explicit(ctx->key, sizeof(ctx->key));
/* context not allocated, skip invalidation */
if (!ctx->base.ctxr)
+ return -ENOMEM;
+
+ memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data));
+ return 0;
+}
+
+static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ if (safexcel_cipher_cra_exit(tfm))
return;
- memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
+ if (priv->version == EIP197) {
+ ret = safexcel_skcipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "skcipher: invalidation error %d\n",
+ ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
+}
+
+static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ if (safexcel_cipher_cra_exit(tfm))
+ return;
if (priv->version == EIP197) {
- ret = safexcel_cipher_exit_inv(tfm);
+ ret = safexcel_aead_exit_inv(tfm);
if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ dev_warn(priv->dev, "aead: invalidation error %d\n",
+ ret);
} else {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
ctx->base.ctxr_dma);
@@ -563,7 +829,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_ecb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .setkey = safexcel_aes_setkey,
+ .setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_ecb_aes_encrypt,
.decrypt = safexcel_ecb_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -586,20 +852,20 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(req, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_aes(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
}
static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(req, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_aes(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .setkey = safexcel_aes_setkey,
+ .setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_cbc_aes_encrypt,
.decrypt = safexcel_cbc_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -620,3 +886,139 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
},
};
+
+static int safexcel_aead_encrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+}
+
+static int safexcel_aead_decrypt(struct aead_request *req)
+{
+ struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+ return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+}
+
+static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_alg_template *tmpl =
+ container_of(tfm->__crt_alg, struct safexcel_alg_template,
+ alg.aead.base);
+
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct safexcel_cipher_req));
+
+ ctx->priv = tmpl->priv;
+
+ ctx->aead = true;
+ ctx->base.send = safexcel_aead_send;
+ ctx->base.handle_result = safexcel_aead_handle_result;
+ return 0;
+}
+
+static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->state_sz = SHA1_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha1_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ ctx->state_sz = SHA256_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha256_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ ctx->state_sz = SHA256_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha224_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 317b9e480312..d138d6b8fec5 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -50,16 +50,6 @@ struct safexcel_ahash_req {
u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
};
-struct safexcel_ahash_export_state {
- u64 len;
- u64 processed;
-
- u32 digest;
-
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA256_BLOCK_SIZE];
-};
-
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
u32 input_length, u32 result_length)
{
@@ -146,11 +136,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
dev_err(priv->dev,
"hash: result: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
- } else if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "hash: result: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EINVAL;
+ } else {
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
@@ -480,7 +467,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+ EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
@@ -912,8 +899,8 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
return crypto_ahash_export(areq, state);
}
-static int safexcel_hmac_setkey(const char *alg, const u8 *key,
- unsigned int keylen, void *istate, void *ostate)
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+ void *istate, void *ostate)
{
struct ahash_request *areq;
struct crypto_ahash *tfm;
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 1e87637c412d..36afd6d8753c 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -334,7 +334,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
return -EPROTO;
case CSB_CC_SEQUENCE:
/* should not happen, we don't use chained CRBs */
- CSB_ERR(csb, "CRB seqeunce number error");
+ CSB_ERR(csb, "CRB sequence number error");
return -EPROTO;
case CSB_CC_UNKNOWN_CODE:
CSB_ERR(csb, "Unknown subfunction code");
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ad02aa63b519..d1a1c74fb56a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1087,7 +1087,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length));
+ get_order(ctx->sg->length + ctx->bufcnt));
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
kfree(ctx->sg);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a4df966adbf6..321d5e2ac833 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1169,8 +1169,7 @@ static void spacc_spacc_complete(unsigned long data)
#ifdef CONFIG_PM
static int spacc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spacc_engine *engine = platform_get_drvdata(pdev);
+ struct spacc_engine *engine = dev_get_drvdata(dev);
/*
* We only support standby mode. All we have to do is gate the clock to
@@ -1184,8 +1183,7 @@ static int spacc_suspend(struct device *dev)
static int spacc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spacc_engine *engine = platform_get_drvdata(pdev);
+ struct spacc_engine *engine = dev_get_drvdata(dev);
return clk_enable(engine->clk);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index f172171668ee..ba197f34c252 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -329,5 +329,7 @@ module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C3XXX_FW);
+MODULE_FIRMWARE(ADF_C3XXX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 58a984c9c3ec..59a5a0df50b6 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -329,5 +329,7 @@ module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C62X_FW);
+MODULE_FIRMWARE(ADF_C62X_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 2ce01f010c74..be5c5a988ca5 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -332,5 +332,6 @@ module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_FIRMWARE(ADF_DH895XCC_FW);
+MODULE_FIRMWARE(ADF_DH895XCC_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 96072b9b55c4..d7316f7a3a69 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
crypto_cipher_set_flags(fallback,
crypto_cipher_get_flags((struct
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 7394d35d5936..5285ece4f33a 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_skcipher_driver_name(fallback));
-
crypto_skcipher_set_flags(
fallback,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index fc60d00a2e84..cd777c75291d 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_skcipher_driver_name(fallback));
crypto_skcipher_set_flags(
fallback,
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8cd6e62e4c90..8bd9aff0f55f 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_skcipher_driver_name(fallback));
crypto_skcipher_set_flags(
fallback,
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 0b4a293b8a1e..d6a9f63d65ba 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1,12 +1,51 @@
#! /usr/bin/env perl
-# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from CRYPTOGAMs[1] and is included here using the option
+# in the license to distribute the code under the GPL. Therefore this program
+# is free software; you can redistribute it and/or modify it under the terms of
+# the GNU General Public License version 2 as published by the Free Software
+# Foundation.
#
-# Licensed under the OpenSSL license (the "License"). You may not use
-# this file except in compliance with the License. You can obtain a copy
-# in the file LICENSE in the source distribution or at
-# https://www.openssl.org/source/license.html
+# [1] https://www.openssl.org/~appro/cryptogams/
+# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain copyright notices,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
#
+# * Neither the name of the CRYPTOGAMS nor the names of its
+# copyright holder and contributors may be used to endorse or
+# promote products derived from this software without specific
+# prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 27a94a119009..1c4b5b889fba 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
crypto_shash_set_flags(fallback,
crypto_shash_get_flags((struct crypto_shash
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index d8429cb71f02..f746af271460 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -1,5 +1,14 @@
#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from the OpenSSL project but the author (Andy Polyakov)
+# has relicensed it under the GPLv2. Therefore this program is free software;
+# you can redistribute it and/or modify it under the terms of the GNU General
+# Public License version 2 as published by the Free Software Foundation.
#
+# The original headers, including the original license headers, are
+# included below for completeness.
+
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and