summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig18
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/mxs-dcp.c104
3 files changed, 95 insertions, 29 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index bb27690f8f7c..94f23c6fc93b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -67,6 +67,7 @@ config CRYPTO_DEV_GEODE
config ZCRYPT
tristate "Support for s390 cryptographic adapters"
depends on S390
+ depends on AP
select HW_RANDOM
help
Select this option if you want to enable support for
@@ -74,23 +75,6 @@ config ZCRYPT
to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP)
or Accelerator (CEXxA) mode.
-config ZCRYPT_DEBUG
- bool "Enable debug features for s390 cryptographic adapters"
- default n
- depends on DEBUG_KERNEL
- depends on ZCRYPT
- help
- Say 'Y' here to enable some additional debug features on the
- s390 cryptographic adapters driver.
-
- There will be some more sysfs attributes displayed for ap cards
- and queues and some flags on crypto requests are interpreted as
- debugging messages to force error injection.
-
- Do not enable on production level kernel build.
-
- If unsure, say N.
-
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index f44efbb89c34..2102377f727b 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data;
int cmd, rc = 0;
- if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 2b3ebe0db3a6..057d73c370b7 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/stmp_device.h>
#include <linux/clk.h>
+#include <soc/fsl/dcp.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
@@ -101,6 +102,7 @@ struct dcp_async_ctx {
struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
+ bool key_referenced;
};
struct dcp_aes_req_ctx {
@@ -155,6 +157,7 @@ static struct dcp *global_sdcp;
#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
+#define MXS_DCP_CONTROL0_OTP_KEY (1 << 10)
#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
@@ -168,6 +171,8 @@ static struct dcp *global_sdcp;
#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
+#define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8
+
static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
{
int dma_err;
@@ -224,13 +229,16 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ bool key_referenced = actx->key_referenced;
int ret;
- key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
- 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
- ret = dma_mapping_error(sdcp->dev, key_phys);
- if (ret)
- return ret;
+ if (!key_referenced) {
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
+ }
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -255,8 +263,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- /* Payload contains the key. */
- desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ if (key_referenced)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
+ else
+ /* Payload contains the key. */
+ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
@@ -270,6 +282,9 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
else
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
+ if (key_referenced)
+ desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT;
+
desc->next_cmd_addr = 0;
desc->source = src_phys;
desc->destination = dst_phys;
@@ -284,9 +299,9 @@ aes_done_run:
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
- dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
- DMA_TO_DEVICE);
-
+ if (!key_referenced)
+ dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
return ret;
}
@@ -453,7 +468,7 @@ static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
- if (unlikely(actx->key_len != AES_KEYSIZE_128))
+ if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced))
return mxs_dcp_block_fallback(req, enc);
rctx->enc = enc;
@@ -500,6 +515,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
* there can still be an operation in progress.
*/
actx->key_len = len;
+ actx->key_referenced = false;
if (len == AES_KEYSIZE_128) {
memcpy(actx->key, key, len);
return 0;
@@ -516,6 +532,32 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(actx->fallback, key, len);
}
+static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
+
+ if (len != DCP_PAES_KEYSIZE)
+ return -EINVAL;
+
+ switch (key[0]) {
+ case DCP_PAES_KEY_SLOT0:
+ case DCP_PAES_KEY_SLOT1:
+ case DCP_PAES_KEY_SLOT2:
+ case DCP_PAES_KEY_SLOT3:
+ case DCP_PAES_KEY_UNIQUE:
+ case DCP_PAES_KEY_OTP:
+ memcpy(actx->key, key, len);
+ actx->key_len = len;
+ actx->key_referenced = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
@@ -539,6 +581,13 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(actx->fallback);
}
+static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
+
+ return 0;
+}
+
/*
* Hashing (SHA1/SHA256)
*/
@@ -889,6 +938,39 @@ static struct skcipher_alg dcp_aes_algs[] = {
.ivsize = AES_BLOCK_SIZE,
.init = mxs_dcp_aes_fallback_init_tfm,
.exit = mxs_dcp_aes_fallback_exit_tfm,
+ }, {
+ .base.cra_name = "ecb(paes)",
+ .base.cra_driver_name = "ecb-paes-dcp",
+ .base.cra_priority = 401,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DCP_PAES_KEYSIZE,
+ .max_keysize = DCP_PAES_KEYSIZE,
+ .setkey = mxs_dcp_aes_setrefkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
+ .init = mxs_dcp_paes_init_tfm,
+ }, {
+ .base.cra_name = "cbc(paes)",
+ .base.cra_driver_name = "cbc-paes-dcp",
+ .base.cra_priority = 401,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DCP_PAES_KEYSIZE,
+ .max_keysize = DCP_PAES_KEYSIZE,
+ .setkey = mxs_dcp_aes_setrefkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mxs_dcp_paes_init_tfm,
},
};