summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-04 20:06:34 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-04 20:06:34 +0400
commitd66e6737d454553e1e62109d8298ede5351178a4 (patch)
treec28b205045935b111527f461d2b114daa26e4fb8 /drivers/crypto
parent612a9aab56a93533e76e3ad91642db7033e03b69 (diff)
parentc9f97a27ceee84998999bf3341e6d5d207b05539 (diff)
downloadlinux-d66e6737d454553e1e62109d8298ede5351178a4.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - Optimised AES/SHA1 for ARM. - IPsec ESN support in talitos and caam. - x86_64/avx implementation of cast5/cast6. - Add/use multi-algorithm registration helpers where possible. - Added IBM Power7+ in-Nest support. - Misc fixes. Fix up trivial conflicts in crypto/Kconfig due to the sparc64 crypto config options being added next to the new ARM ones. [ Side note: cut-and-paste duplicate help texts make those conflicts harder to read than necessary, thanks to git being smart about minimizing conflicts and maximizing the common parts... ] * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits) crypto: x86/glue_helper - fix storing of new IV in CBC encryption crypto: cast5/avx - fix storing of new IV in CBC encryption crypto: tcrypt - add missing tests for camellia and ghash crypto: testmgr - make test_aead also test 'dst != src' code paths crypto: testmgr - make test_skcipher also test 'dst != src' code paths crypto: testmgr - add test vectors for CTR mode IV increasement crypto: testmgr - add test vectors for partial ctr(cast5) and ctr(cast6) crypto: testmgr - allow non-multi page and multi page skcipher tests from same test template crypto: caam - increase TRNG clocks per sample crypto, tcrypt: remove local_bh_disable/enable() around local_irq_disable/enable() crypto: tegra-aes - fix error return code crypto: crypto4xx - fix error return code crypto: hifn_795x - fix error return code crypto: ux500 - fix error return code crypto: caam - fix error IDs for SEC v5.x RNG4 hwrng: mxc-rnga - Access data via structure hwrng: mxc-rnga - Adapt clocks to new i.mx clock framework crypto: caam - add IPsec ESN support crypto: 842 - remove .cra_list initialization Revert "[CRYPTO] cast6: inline bloat--" ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig22
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1
-rw-r--r--drivers/crypto/atmel-aes.c7
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c6
-rw-r--r--drivers/crypto/caam/caamalg.c51
-rw-r--r--drivers/crypto/caam/caamhash.c22
-rw-r--r--drivers/crypto/caam/caamrng.c9
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c6
-rw-r--r--drivers/crypto/caam/error.c2
-rw-r--r--drivers/crypto/caam/key_gen.c4
-rw-r--r--drivers/crypto/geode-aes.c18
-rw-r--r--drivers/crypto/hifn_795x.c5
-rw-r--r--drivers/crypto/nx/Kconfig26
-rw-r--r--drivers/crypto/nx/Makefile5
-rw-r--r--drivers/crypto/nx/nx-842.c1617
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c1
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/padlock-aes.c3
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/talitos.c442
-rw-r--r--drivers/crypto/tegra-aes.c3
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1
29 files changed, 1924 insertions, 343 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 7d74d092aa8f..308c7fb92a60 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -298,21 +298,15 @@ config CRYPTO_DEV_TEGRA_AES
will be called tegra-aes.
config CRYPTO_DEV_NX
- tristate "Support for Power7+ in-Nest cryptographic acceleration"
+ bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
depends on PPC64 && IBMVIO
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_CCM
- select CRYPTO_GCM
- select CRYPTO_AUTHENC
- select CRYPTO_XCBC
- select CRYPTO_SHA256
- select CRYPTO_SHA512
+ default n
help
- Support for Power7+ in-Nest cryptographic acceleration. This
- module supports acceleration for AES and SHA2 algorithms. If you
- choose 'M' here, this module will be called nx_crypto.
+ Support for Power7+ in-Nest cryptographic acceleration.
+
+if CRYPTO_DEV_NX
+ source "drivers/crypto/nx/Kconfig"
+endif
config CRYPTO_DEV_UX500
tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
@@ -340,7 +334,7 @@ config CRYPTO_DEV_ATMEL_AES
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
- select CONFIG_AT_HDMAC
+ select AT_HDMAC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 802e85102c32..f88e3d8f6b64 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1226,6 +1226,7 @@ static int __init crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
+ rc = -ENOMEM;
goto err_iomap;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6bb20fffbf49..8061336e07e7 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -24,15 +24,10 @@
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -1017,7 +1012,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- INIT_LIST_HEAD(&aes_algs[i].cra_list);
err = crypto_register_alg(&aes_algs[i]);
if (err)
goto err_aes_algs;
@@ -1026,7 +1020,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
atmel_aes_hw_version_init(dd);
if (dd->hw_version >= 0x130) {
- INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
err = crypto_register_alg(&aes_cfb64_alg[0]);
if (err)
goto err_aes_cfb64_alg;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index f938b9d79b66..bcdf55fdc623 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -24,15 +24,10 @@
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index eb2b61e57e2d..7495f98c7221 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -24,15 +24,10 @@
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -1044,7 +1039,6 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
- INIT_LIST_HEAD(&tdes_algs[i].cra_list);
err = crypto_register_alg(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0c1ea8492eff..b2a0a0726a54 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -205,7 +205,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
{
u32 *key_jump_cmd;
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
@@ -224,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = 0;
+ bool keys_fit_inline = false;
u32 *key_jump_cmd, *jump_cmd;
u32 geniv, moveiv;
u32 *desc;
@@ -239,7 +239,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
- keys_fit_inline = 1;
+ keys_fit_inline = true;
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -297,12 +297,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
- keys_fit_inline = 1;
+ keys_fit_inline = true;
desc = ctx->sh_desc_dec;
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
@@ -365,7 +365,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
- keys_fit_inline = 1;
+ keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
@@ -564,7 +564,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@@ -605,7 +605,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@@ -1354,10 +1354,10 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
contig &= ~GIV_SRC_CONTIG;
if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
contig &= ~GIV_DST_CONTIG;
- if (unlikely(req->src != req->dst)) {
- dst_nents = dst_nents ? : 1;
- sec4_sg_len += 1;
- }
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_len += 1;
+ }
if (!(contig & GIV_SRC_CONTIG)) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
@@ -1650,7 +1650,11 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
- /* single-pass ipsec_esp descriptor */
+ /*
+ * single-pass ipsec_esp descriptor
+ * authencesn(*,*) is also registered, although not present
+ * explicitly here.
+ */
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
@@ -2213,7 +2217,9 @@ static int __init caam_algapi_init(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
+ bool done = false;
+authencesn:
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@@ -2227,8 +2233,25 @@ static int __init caam_algapi_init(void)
dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
- } else
+ } else {
list_add_tail(&t_alg->entry, &priv->alg_list);
+ if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
+ !memcmp(driver_algs[i].name, "authenc", 7) &&
+ !done) {
+ char *name;
+
+ name = driver_algs[i].name;
+ memmove(name + 10, name + 7, strlen(name) - 7);
+ memcpy(name + 7, "esn", 3);
+
+ name = driver_algs[i].driver_name;
+ memmove(name + 10, name + 7, strlen(name) - 7);
+ memcpy(name + 7, "esn", 3);
+
+ done = true;
+ goto authencesn;
+ }
+ }
}
if (!list_empty(&priv->alg_list))
dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 895aaf2bca92..32aba7a61503 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -225,7 +225,7 @@ static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
u32 *key_jump_cmd;
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
if (ctx->split_key_len) {
/* Skip if already shared */
@@ -311,7 +311,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Import context from software */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -430,6 +430,10 @@ static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
int ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "unable to allocate key input memory\n");
+ return -ENOMEM;
+ }
init_job_desc(desc, 0);
@@ -1736,8 +1740,11 @@ static void __exit caam_algapi_hash_exit(void)
struct caam_hash_alg *t_alg, *n;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return;
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return;
+ }
pdev = of_find_device_by_node(dev_node);
if (!pdev)
@@ -1812,8 +1819,11 @@ static int __init caam_algapi_hash_init(void)
int i = 0, err = 0;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return -ENODEV;
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
pdev = of_find_device_by_node(dev_node);
if (!pdev)
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index e2bfe161dece..d1939a9539c0 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -193,7 +193,7 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
struct device *jrdev = ctx->jrdev;
u32 *desc = ctx->sh_desc;
- init_sh_desc(desc, HDR_SHARE_WAIT);
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
@@ -284,8 +284,11 @@ static int __init caam_rng_init(void)
struct caam_drv_private *priv;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return -ENODEV;
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
pdev = of_find_device_by_node(dev_node);
if (!pdev)
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 762aeff626ac..cf15e7813801 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+#include <linux/string.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 414ba20c05a1..bf20dd891705 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -129,7 +129,7 @@ static int instantiate_rng(struct device *jrdev)
/*
* By default, the TRNG runs for 200 clocks per sample;
- * 800 clocks per sample generates better entropy.
+ * 1600 clocks per sample generates better entropy.
*/
static void kick_trng(struct platform_device *pdev)
{
@@ -144,9 +144,9 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
- /* 800 clocks per sample */
+ /* 1600 clocks per sample */
val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
/* min. freq. count */
wr_reg32(&r4tst->rtfrqmin, 400);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 9955ed9643e6..30b8f74833d4 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -77,10 +77,8 @@ static void report_ccb_status(u32 status, char *outstr)
"Not instantiated",
"Test instantiate",
"Prediction resistance",
- "",
"Prediction resistance and test request",
"Uninstantiate",
- "",
"Secure key generation",
};
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index d216cd3cc569..f6dba10246c3 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -54,6 +54,10 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "unable to allocate key input memory\n");
+ return -ENOMEM;
+ }
init_job_desc(desc, 0);
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index f3e36c86b6c3..51f196d77f21 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -289,7 +289,6 @@ static struct crypto_alg geode_alg = {
.cra_blocksize = AES_MIN_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
@@ -402,7 +401,6 @@ static struct crypto_alg geode_cbc_alg = {
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -489,7 +487,6 @@ static struct crypto_alg geode_ecb_alg = {
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -588,21 +585,8 @@ static struct pci_driver geode_aes_driver = {
.remove = __devexit_p(geode_aes_remove)
};
-static int __init
-geode_aes_init(void)
-{
- return pci_register_driver(&geode_aes_driver);
-}
-
-static void __exit
-geode_aes_exit(void)
-{
- pci_unregister_driver(&geode_aes_driver);
-}
+module_pci_driver(geode_aes_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
MODULE_LICENSE("GPL");
-
-module_init(geode_aes_init);
-module_exit(geode_aes_exit);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index df14358d7fa1..fda32968a66b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2611,14 +2611,17 @@ static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id
size = pci_resource_len(pdev, i);
dev->bar[i] = ioremap_nocache(addr, size);
- if (!dev->bar[i])
+ if (!dev->bar[i]) {
+ err = -ENOMEM;
goto err_out_unmap_bars;
+ }
}
dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
&dev->desc_dma);
if (!dev->desc_virt) {
dprintk("Failed to allocate descriptor rings.\n");
+ err = -ENOMEM;
goto err_out_unmap_bars;
}
memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
new file mode 100644
index 000000000000..f82616621ae1
--- /dev/null
+++ b/drivers/crypto/nx/Kconfig
@@ -0,0 +1,26 @@
+config CRYPTO_DEV_NX_ENCRYPT
+ tristate "Encryption acceleration support"
+ depends on PPC64 && IBMVIO
+ default y
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_CCM
+ select CRYPTO_GCM
+ select CRYPTO_AUTHENC
+ select CRYPTO_XCBC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ help
+ Support for Power7+ in-Nest encryption acceleration. This
+ module supports acceleration for AES and SHA2 algorithms. If you
+ choose 'M' here, this module will be called nx_crypto.
+
+config CRYPTO_DEV_NX_COMPRESS
+ tristate "Compression acceleration support"
+ depends on PPC64 && IBMVIO
+ default y
+ help
+ Support for Power7+ in-Nest compression acceleration. This
+ module supports acceleration for AES and SHA2 algorithms. If you
+ choose 'M' here, this module will be called nx_compress.
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index 411ce59c80d1..bb770ea45ce9 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
nx_debugfs.o \
nx-aes-cbc.o \
@@ -9,3 +9,6 @@ nx-crypto-objs := nx.o \
nx-aes-xcbc.o \
nx-sha256.o \
nx-sha512.o
+
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o
+nx-compress-objs := nx-842.o
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
new file mode 100644
index 000000000000..0ce625738677
--- /dev/null
+++ b/drivers/crypto/nx/nx-842.c
@@ -0,0 +1,1617 @@
+/*
+ * Driver for IBM Power 842 compression accelerator
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Seth Jennings <sjenning@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nx842.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/pSeries_reconfig.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h" /* struct nx_csbcpb */
+
+#define MODULE_NAME "nx-compress"
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+
+#define SHIFT_4K 12
+#define SHIFT_64K 16
+#define SIZE_4K (1UL << SHIFT_4K)
+#define SIZE_64K (1UL << SHIFT_64K)
+
+/* IO buffer must be 128 byte aligned */
+#define IO_BUFFER_ALIGN 128
+
+struct nx842_header {
+ int blocks_nr; /* number of compressed blocks */
+ int offset; /* offset of the first block (from beginning of header) */
+ int sizes[0]; /* size of compressed blocks */
+};
+
+static inline int nx842_header_size(const struct nx842_header *hdr)
+{
+ return sizeof(struct nx842_header) +
+ hdr->blocks_nr * sizeof(hdr->sizes[0]);
+}
+
+/* Macros for fields within nx_csbcpb */
+/* Check the valid bit within the csbcpb valid field */
+#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
+
+/* CE macros operate on the completion_extension field bits in the csbcpb.
+ * CE0 0=full completion, 1=partial completion
+ * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
+ * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
+#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
+#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
+#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
+
+/* The NX unit accepts data only on 4K page boundaries */
+#define NX842_HW_PAGE_SHIFT SHIFT_4K
+#define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT)
+#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
+
+enum nx842_status {
+ UNAVAILABLE,
+ AVAILABLE
+};
+
+struct ibm_nx842_counters {
+ atomic64_t comp_complete;
+ atomic64_t comp_failed;
+ atomic64_t decomp_complete;
+ atomic64_t decomp_failed;
+ atomic64_t swdecomp;
+ atomic64_t comp_times[32];
+ atomic64_t decomp_times[32];
+};
+
+static struct nx842_devdata {
+ struct vio_dev *vdev;
+ struct device *dev;
+ struct ibm_nx842_counters *counters;
+ unsigned int max_sg_len;
+ unsigned int max_sync_size;
+ unsigned int max_sync_sg;
+ enum nx842_status status;
+} __rcu *devdata;
+static DEFINE_SPINLOCK(devdata_mutex);
+
+#define NX842_COUNTER_INC(_x) \
+static inline void nx842_inc_##_x( \
+ const struct nx842_devdata *dev) { \
+ if (dev) \
+ atomic64_inc(&dev->counters->_x); \
+}
+NX842_COUNTER_INC(comp_complete);
+NX842_COUNTER_INC(comp_failed);
+NX842_COUNTER_INC(decomp_complete);
+NX842_COUNTER_INC(decomp_failed);
+NX842_COUNTER_INC(swdecomp);
+
+#define NX842_HIST_SLOTS 16
+
+static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
+{
+ int bucket = fls(time);
+
+ if (bucket)
+ bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
+
+ atomic64_inc(&times[bucket]);
+}
+
+/* NX unit operation flags */
+#define NX842_OP_COMPRESS 0x0
+#define NX842_OP_CRC 0x1
+#define NX842_OP_DECOMPRESS 0x2
+#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
+#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
+#define NX842_OP_ASYNC (1<<23)
+#define NX842_OP_NOTIFY (1<<22)
+#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
+
+static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
+{
+ /* No use of DMA mappings within the driver. */
+ return 0;
+}
+
+struct nx842_slentry {
+ unsigned long ptr; /* Real address (use __pa()) */
+ unsigned long len;
+};
+
+/* pHyp scatterlist entry */
+struct nx842_scatterlist {
+ int entry_nr; /* number of slentries */
+ struct nx842_slentry *entries; /* ptr to array of slentries */
+};
+
+/* Does not include sizeof(entry_nr) in the size */
+static inline unsigned long nx842_get_scatterlist_size(
+ struct nx842_scatterlist *sl)
+{
+ return sl->entry_nr * sizeof(struct nx842_slentry);
+}
+
+static int nx842_build_scatterlist(unsigned long buf, int len,
+ struct nx842_scatterlist *sl)
+{
+ unsigned long nextpage;
+ struct nx842_slentry *entry;
+
+ sl->entry_nr = 0;
+
+ entry = sl->entries;
+ while (len) {
+ entry->ptr = __pa(buf);
+ nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
+ if (nextpage < buf + len) {
+ /* we aren't at the end yet */
+ if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE))
+ /* we are in the middle (or beginning) */
+ entry->len = NX842_HW_PAGE_SIZE;
+ else
+ /* we are at the beginning */
+ entry->len = nextpage - buf;
+ } else {
+ /* at the end */
+ entry->len = len;
+ }
+
+ len -= entry->len;
+ buf += entry->len;
+ sl->entry_nr++;
+ entry++;
+ }
+
+ return 0;
+}
+
+/*
+ * Working memory for software decompression
+ */
+struct sw842_fifo {
+ union {
+ char f8[256][8];
+ char f4[512][4];
+ };
+ char f2[256][2];
+ unsigned char f84_full;
+ unsigned char f2_full;
+ unsigned char f8_count;
+ unsigned char f2_count;
+ unsigned int f4_count;
+};
+
+/*
+ * Working memory for crypto API
+ */
+struct nx842_workmem {
+ char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */
+ union {
+ /* hardware working memory */
+ struct {
+ /* scatterlist */
+ char slin[SIZE_4K];
+ char slout[SIZE_4K];
+ /* coprocessor status/parameter block */
+ struct nx_csbcpb csbcpb;
+ };
+ /* software working memory */
+ struct sw842_fifo swfifo; /* software decompression fifo */
+ };
+};
+
+int nx842_get_workmem_size(void)
+{
+ return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE;
+}
+EXPORT_SYMBOL_GPL(nx842_get_workmem_size);
+
+int nx842_get_workmem_size_aligned(void)
+{
+ return sizeof(struct nx842_workmem);
+}
+EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned);
+
+static int nx842_validate_result(struct device *dev,
+ struct cop_status_block *csb)
+{
+ /* The csb must be valid after returning from vio_h_cop_sync */
+ if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
+ dev_err(dev, "%s: cspcbp not valid upon completion.\n",
+ __func__);
+ dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
+ csb->valid,
+ csb->crb_seq_number,
+ csb->completion_code,
+ csb->completion_extension);
+ dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
+ csb->processed_byte_count,
+ (unsigned long)csb->address);
+ return -EIO;
+ }
+
+ /* Check return values from the hardware in the CSB */
+ switch (csb->completion_code) {
+ case 0: /* Completed without error */
+ break;
+ case 64: /* Target bytes > Source bytes during compression */
+ case 13: /* Output buffer too small */
+ dev_dbg(dev, "%s: Compression output larger than input\n",
+ __func__);
+ return -ENOSPC;
+ case 66: /* Input data contains an illegal template field */
+ case 67: /* Template indicates data past the end of the input stream */
+ dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
+ __func__, csb->completion_code);
+ return -EINVAL;
+ default:
+ dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
+ __func__, csb->completion_code);
+ return -EIO;
+ }
+
+ /* Hardware sanity check */
+ if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
+ dev_err(dev, "%s: No error returned by hardware, but "
+ "data returned is unusable, contact support.\n"
+ "(Additional info: csbcbp->processed bytes "
+ "does not specify processed bytes for the "
+ "target buffer.)\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * nx842_compress - Compress data using the 842 algorithm
+ *
+ * Compression provide by the NX842 coprocessor on IBM Power systems.
+ * The input buffer is compressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * compressed data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: Pointer to input buffer, must be page aligned
+ * @inlen: Length of input buffer, must be PAGE_SIZE
+ * @out: Pointer to output buffer
+ * @outlen: Length of output buffer
+ * @wrkmem: ptr to buffer for working memory, size determined by
+ * nx842_get_workmem_size()
+ *
+ * Returns:
+ * 0 Success, output of length @outlen stored in the buffer at @out
+ * -ENOMEM Unable to allocate internal buffers
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE XXX Difficult to describe this limitation
+ * -EIO Internal error
+ * -ENODEV Hardware unavailable
+ */
+int nx842_compress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlen, void *wmem)
+{
+ struct nx842_header *hdr;
+ struct nx842_devdata *local_devdata;
+ struct device *dev = NULL;
+ struct nx842_workmem *workmem;
+ struct nx842_scatterlist slin, slout;
+ struct nx_csbcpb *csbcpb;
+ int ret = 0, max_sync_size, i, bytesleft, size, hdrsize;
+ unsigned long inbuf, outbuf, padding;
+ struct vio_pfo_op op = {
+ .done = NULL,
+ .handle = 0,
+ .timeout = 0,
+ };
+ unsigned long start_time = get_tb();
+
+ /*
+ * Make sure input buffer is 64k page aligned. This is assumed since
+ * this driver is designed for page compression only (for now). This
+ * is very nice since we can now use direct DDE(s) for the input and
+ * the alignment is guaranteed.
+ */
+ inbuf = (unsigned long)in;
+ if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE)
+ return -EINVAL;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (!local_devdata || !local_devdata->dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ max_sync_size = local_devdata->max_sync_size;
+ dev = local_devdata->dev;
+
+ /* Create the header */
+ hdr = (struct nx842_header *)out;
+ hdr->blocks_nr = PAGE_SIZE / max_sync_size;
+ hdrsize = nx842_header_size(hdr);
+ outbuf = (unsigned long)out + hdrsize;
+ bytesleft = *outlen - hdrsize;
+
+ /* Init scatterlist */
+ workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
+ NX842_HW_PAGE_SIZE);
+ slin.entries = (struct nx842_slentry *)workmem->slin;
+ slout.entries = (struct nx842_slentry *)workmem->slout;
+
+ /* Init operation */
+ op.flags = NX842_OP_COMPRESS;
+ csbcpb = &workmem->csbcpb;
+ memset(csbcpb, 0, sizeof(*csbcpb));
+ op.csbcpb = __pa(csbcpb);
+ op.out = __pa(slout.entries);
+
+ for (i = 0; i < hdr->blocks_nr; i++) {
+ /*
+ * Aligning the output blocks to 128 bytes does waste space,
+ * but it prevents the need for bounce buffers and memory
+ * copies. It also simplifies the code a lot. In the worst
+ * case (64k page, 4k max_sync_size), you lose up to
+ * (128*16)/64k = ~3% the compression factor. For 64k
+ * max_sync_size, the loss would be at most 128/64k = ~0.2%.
+ */
+ padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf;
+ outbuf += padding;
+ bytesleft -= padding;
+ if (i == 0)
+ /* save offset into first block in header */
+ hdr->offset = padding + hdrsize;
+
+ if (bytesleft <= 0) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ /*
+ * NOTE: If the default max_sync_size is changed from 4k
+ * to 64k, remove the "likely" case below, since a
+ * scatterlist will always be needed.
+ */
+ if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
+ /* Create direct DDE */
+ op.in = __pa(inbuf);
+ op.inlen = max_sync_size;
+
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(inbuf, max_sync_size, &slin);
+ op.in = __pa(slin.entries);
+ op.inlen = -nx842_get_scatterlist_size(&slin);
+ }
+
+ /*
+ * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect
+ * DDE is required for the outbuf.
+ * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must
+ * also be page aligned (1 in 128/4k=32 chance) in order
+ * to use a direct DDE.
+ * This is unlikely, just use an indirect DDE always.
+ */
+ nx842_build_scatterlist(outbuf,
+ min(bytesleft, max_sync_size), &slout);
+ /* op.out set before loop */
+ op.outlen = -nx842_get_scatterlist_size(&slout);
+
+ /* Send request to pHyp */
+ ret = vio_h_cop_sync(local_devdata->vdev, &op);
+
+ /* Check for pHyp error */
+ if (ret) {
+ dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
+ __func__, ret, op.hcall_err);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Check for hardware error */
+ ret = nx842_validate_result(dev, &csbcpb->csb);
+ if (ret && ret != -ENOSPC)
+ goto unlock;
+
+ /* Handle incompressible data */
+ if (unlikely(ret == -ENOSPC)) {
+ if (bytesleft < max_sync_size) {
+ /*
+ * Not enough space left in the output buffer
+ * to store uncompressed block
+ */
+ goto unlock;
+ } else {
+ /* Store incompressible block */
+ memcpy((void *)outbuf, (void *)inbuf,
+ max_sync_size);
+ hdr->sizes[i] = -max_sync_size;
+ outbuf += max_sync_size;
+ bytesleft -= max_sync_size;
+ /* Reset ret, incompressible data handled */
+ ret = 0;
+ }
+ } else {
+ /* Normal case, compression was successful */
+ size = csbcpb->csb.processed_byte_count;
+ dev_dbg(dev, "%s: processed_bytes=%d\n",
+ __func__, size);
+ hdr->sizes[i] = size;
+ outbuf += size;
+ bytesleft -= size;
+ }
+
+ inbuf += max_sync_size;
+ }
+
+ *outlen = (unsigned int)(outbuf - (unsigned long)out);
+
+unlock:
+ if (ret)
+ nx842_inc_comp_failed(local_devdata);
+ else {
+ nx842_inc_comp_complete(local_devdata);
+ ibm_nx842_incr_hist(local_devdata->counters->comp_times,
+ (get_tb() - start_time) / tb_ticks_per_usec);
+ }
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_compress);
+
+static int sw842_decompress(const unsigned char *, int, unsigned char *, int *,
+ const void *);
+
+/**
+ * nx842_decompress - Decompress data using the 842 algorithm
+ *
+ * Decompression provide by the NX842 coprocessor on IBM Power systems.
+ * The input buffer is decompressed and the result is stored in the
+ * provided output buffer. The size allocated to the output buffer is
+ * provided by the caller of this function in @outlen. Upon return from
+ * this function @outlen contains the length of the decompressed data.
+ * If there is an error then @outlen will be 0 and an error will be
+ * specified by the return code from this function.
+ *
+ * @in: Pointer to input buffer, will use bounce buffer if not 128 byte
+ * aligned
+ * @inlen: Length of input buffer
+ * @out: Pointer to output buffer, must be page aligned
+ * @outlen: Length of output buffer, must be PAGE_SIZE
+ * @wrkmem: ptr to buffer for working memory, size determined by
+ * nx842_get_workmem_size()
+ *
+ * Returns:
+ * 0 Success, output of length @outlen stored in the buffer at @out
+ * -ENODEV Hardware decompression device is unavailable
+ * -ENOMEM Unable to allocate internal buffers
+ * -ENOSPC Output buffer is to small
+ * -EINVAL Bad input data encountered when attempting decompress
+ * -EIO Internal error
+ */
+int nx842_decompress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlen, void *wmem)
+{
+ struct nx842_header *hdr;
+ struct nx842_devdata *local_devdata;
+ struct device *dev = NULL;
+ struct nx842_workmem *workmem;
+ struct nx842_scatterlist slin, slout;
+ struct nx_csbcpb *csbcpb;
+ int ret = 0, i, size, max_sync_size;
+ unsigned long inbuf, outbuf;
+ struct vio_pfo_op op = {
+ .done = NULL,
+ .handle = 0,
+ .timeout = 0,
+ };
+ unsigned long start_time = get_tb();
+
+ /* Ensure page alignment and size */
+ outbuf = (unsigned long)out;
+ if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE)
+ return -EINVAL;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (local_devdata)
+ dev = local_devdata->dev;
+
+ /* Get header */
+ hdr = (struct nx842_header *)in;
+
+ workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
+ NX842_HW_PAGE_SIZE);
+
+ inbuf = (unsigned long)in + hdr->offset;
+ if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) {
+ /* Copy block(s) into bounce buffer for alignment */
+ memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset);
+ inbuf = (unsigned long)workmem->bounce;
+ }
+
+ /* Init scatterlist */
+ slin.entries = (struct nx842_slentry *)workmem->slin;
+ slout.entries = (struct nx842_slentry *)workmem->slout;
+
+ /* Init operation */
+ op.flags = NX842_OP_DECOMPRESS;
+ csbcpb = &workmem->csbcpb;
+ memset(csbcpb, 0, sizeof(*csbcpb));
+ op.csbcpb = __pa(csbcpb);
+
+ /*
+ * max_sync_size may have changed since compression,
+ * so we can't read it from the device info. We need
+ * to derive it from hdr->blocks_nr.
+ */
+ max_sync_size = PAGE_SIZE / hdr->blocks_nr;
+
+ for (i = 0; i < hdr->blocks_nr; i++) {
+ /* Skip padding */
+ inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN);
+
+ if (hdr->sizes[i] < 0) {
+ /* Negative sizes indicate uncompressed data blocks */
+ size = abs(hdr->sizes[i]);
+ memcpy((void *)outbuf, (void *)inbuf, size);
+ outbuf += size;
+ inbuf += size;
+ continue;
+ }
+
+ if (!dev)
+ goto sw;
+
+ /*
+ * The better the compression, the more likely the "likely"
+ * case becomes.
+ */
+ if (likely((inbuf & NX842_HW_PAGE_MASK) ==
+ ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
+ /* Create direct DDE */
+ op.in = __pa(inbuf);
+ op.inlen = hdr->sizes[i];
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
+ op.in = __pa(slin.entries);
+ op.inlen = -nx842_get_scatterlist_size(&slin);
+ }
+
+ /*
+ * NOTE: If the default max_sync_size is changed from 4k
+ * to 64k, remove the "likely" case below, since a
+ * scatterlist will always be needed.
+ */
+ if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
+ /* Create direct DDE */
+ op.out = __pa(outbuf);
+ op.outlen = max_sync_size;
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(outbuf, max_sync_size, &slout);
+ op.out = __pa(slout.entries);
+ op.outlen = -nx842_get_scatterlist_size(&slout);
+ }
+
+ /* Send request to pHyp */
+ ret = vio_h_cop_sync(local_devdata->vdev, &op);
+
+ /* Check for pHyp error */
+ if (ret) {
+ dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
+ __func__, ret, op.hcall_err);
+ dev = NULL;
+ goto sw;
+ }
+
+ /* Check for hardware error */
+ ret = nx842_validate_result(dev, &csbcpb->csb);
+ if (ret) {
+ dev = NULL;
+ goto sw;
+ }
+
+ /* HW decompression success */
+ inbuf += hdr->sizes[i];
+ outbuf += csbcpb->csb.processed_byte_count;
+ continue;
+
+sw:
+ /* software decompression */
+ size = max_sync_size;
+ ret = sw842_decompress(
+ (unsigned char *)inbuf, hdr->sizes[i],
+ (unsigned char *)outbuf, &size, wmem);
+ if (ret)
+ pr_debug("%s: sw842_decompress failed with %d\n",
+ __func__, ret);
+
+ if (ret) {
+ if (ret != -ENOSPC && ret != -EINVAL &&
+ ret != -EMSGSIZE)
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* SW decompression success */
+ inbuf += hdr->sizes[i];
+ outbuf += size;
+ }
+
+ *outlen = (unsigned int)(outbuf - (unsigned long)out);
+
+unlock:
+ if (ret)
+ /* decompress fail */
+ nx842_inc_decomp_failed(local_devdata);
+ else {
+ if (!dev)
+ /* software decompress */
+ nx842_inc_swdecomp(local_devdata);
+ nx842_inc_decomp_complete(local_devdata);
+ ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
+ (get_tb() - start_time) / tb_ticks_per_usec);
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_decompress);
+
+/**
+ * nx842_OF_set_defaults -- Set default (disabled) values for devdata
+ *
+ * @devdata - struct nx842_devdata to update
+ *
+ * Returns:
+ * 0 on success
+ * -ENOENT if @devdata ptr is NULL
+ */
+static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
+{
+ if (devdata) {
+ devdata->max_sync_size = 0;
+ devdata->max_sync_sg = 0;
+ devdata->max_sg_len = 0;
+ devdata->status = UNAVAILABLE;
+ return 0;
+ } else
+ return -ENOENT;
+}
+
+/**
+ * nx842_OF_upd_status -- Update the device info from OF status prop
+ *
+ * The status property indicates if the accelerator is enabled. If the
+ * device is in the OF tree it indicates that the hardware is present.
+ * The status field indicates if the device is enabled when the status
+ * is 'okay'. Otherwise the device driver will be disabled.
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ * 0 - Device is available
+ * -EINVAL - Device is not available
+ */
+static int nx842_OF_upd_status(struct nx842_devdata *devdata,
+ struct property *prop) {
+ int ret = 0;
+ const char *status = (const char *)prop->value;
+
+ if (!strncmp(status, "okay", (size_t)prop->length)) {
+ devdata->status = AVAILABLE;
+ } else {
+ dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
+ __func__, status);
+ devdata->status = UNAVAILABLE;
+ }
+
+ return ret;
+}
+
+/**
+ * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
+ *
+ * Definition of the 'ibm,max-sg-len' OF property:
+ * This field indicates the maximum byte length of a scatter list
+ * for the platform facility. It is a single cell encoded as with encode-int.
+ *
+ * Example:
+ * # od -x ibm,max-sg-len
+ * 0000000 0000 0ff0
+ *
+ * In this example, the maximum byte length of a scatter list is
+ * 0x0ff0 (4,080).
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ * 0 on success
+ * -EINVAL on failure
+ */
+static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
+ struct property *prop) {
+ int ret = 0;
+ const int *maxsglen = prop->value;
+
+ if (prop->length != sizeof(*maxsglen)) {
+ dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
+ dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
+ prop->length, sizeof(*maxsglen));
+ ret = -EINVAL;
+ } else {
+ devdata->max_sg_len = (unsigned int)min(*maxsglen,
+ (int)NX842_HW_PAGE_SIZE);
+ }
+
+ return ret;
+}
+
+/**
+ * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
+ *
+ * Definition of the 'ibm,max-sync-cop' OF property:
+ * Two series of cells. The first series of cells represents the maximums
+ * that can be synchronously compressed. The second series of cells
+ * represents the maximums that can be synchronously decompressed.
+ * 1. The first cell in each series contains the count of the number of
+ * data length, scatter list elements pairs that follow – each being
+ * of the form
+ * a. One cell data byte length
+ * b. One cell total number of scatter list elements
+ *
+ * Example:
+ * # od -x ibm,max-sync-cop
+ * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001
+ * 0000020 0000 1000 0000 01fe
+ *
+ * In this example, compression supports 0x1000 (4,096) data byte length
+ * and 0x1fe (510) total scatter list elements. Decompression supports
+ * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
+ * elements.
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ * 0 on success
+ * -EINVAL on failure
+ */
+static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
+ struct property *prop) {
+ int ret = 0;
+ const struct maxsynccop_t {
+ int comp_elements;
+ int comp_data_limit;
+ int comp_sg_limit;
+ int decomp_elements;
+ int decomp_data_limit;
+ int decomp_sg_limit;
+ } *maxsynccop;
+
+ if (prop->length != sizeof(*maxsynccop)) {
+ dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
+ dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
+ sizeof(*maxsynccop));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ maxsynccop = (const struct maxsynccop_t *)prop->value;
+
+ /* Use one limit rather than separate limits for compression and
+ * decompression. Set a maximum for this so as not to exceed the
+ * size that the header can support and round the value down to
+ * the hardware page size (4K) */
+ devdata->max_sync_size =
+ (unsigned int)min(maxsynccop->comp_data_limit,
+ maxsynccop->decomp_data_limit);
+
+ devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
+ SIZE_64K);
+
+ if (devdata->max_sync_size < SIZE_4K) {
+ dev_err(devdata->dev, "%s: hardware max data size (%u) is "
+ "less than the driver minimum, unable to use "
+ "the hardware device\n",
+ __func__, devdata->max_sync_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit,
+ maxsynccop->decomp_sg_limit);
+ if (devdata->max_sync_sg < 1) {
+ dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
+ "less than the driver minimum, unable to use "
+ "the hardware device\n",
+ __func__, devdata->max_sync_sg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/**
+ *
+ * nx842_OF_upd -- Handle OF properties updates for the device.
+ *
+ * Set all properties from the OF tree. Optionally, a new property
+ * can be provided by the @new_prop pointer to overwrite an existing value.
+ * The device will remain disabled until all values are valid, this function
+ * will return an error for updates unless all values are valid.
+ *
+ * @new_prop: If not NULL, this property is being updated. If NULL, update
+ * all properties from the current values in the OF tree.
+ *
+ * Returns:
+ * 0 - Success
+ * -ENOMEM - Could not allocate memory for new devdata structure
+ * -EINVAL - property value not found, new_prop is not a recognized
+ * property for the device or property value is not valid.
+ * -ENODEV - Device is not available
+ */
+static int nx842_OF_upd(struct property *new_prop)
+{
+ struct nx842_devdata *old_devdata = NULL;
+ struct nx842_devdata *new_devdata = NULL;
+ struct device_node *of_node = NULL;
+ struct property *status = NULL;
+ struct property *maxsglen = NULL;
+ struct property *maxsyncop = NULL;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+ if (old_devdata)
+ of_node = old_devdata->dev->of_node;
+
+ if (!old_devdata || !of_node) {
+ pr_err("%s: device is not available\n", __func__);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ return -ENODEV;
+ }
+
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata) {
+ dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
+ ret = -ENOMEM;
+ goto error_out;
+ }
+
+ memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
+ new_devdata->counters = old_devdata->counters;
+
+ /* Set ptrs for existing properties */
+ status = of_find_property(of_node, "status", NULL);
+ maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
+ maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
+ if (!status || !maxsglen || !maxsyncop) {
+ dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ /* Set ptr to new property if provided */
+ if (new_prop) {
+ /* Single property */
+ if (!strncmp(new_prop->name, "status", new_prop->length)) {
+ status = new_prop;
+
+ } else if (!strncmp(new_prop->name, "ibm,max-sg-len",
+ new_prop->length)) {
+ maxsglen = new_prop;
+
+ } else if (!strncmp(new_prop->name, "ibm,max-sync-cop",
+ new_prop->length)) {
+ maxsyncop = new_prop;
+
+ } else {
+ /*
+ * Skip the update, the property being updated
+ * has no impact.
+ */
+ goto out;
+ }
+ }
+
+ /* Perform property updates */
+ ret = nx842_OF_upd_status(new_devdata, status);
+ if (ret)
+ goto error_out;
+
+ ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
+ if (ret)
+ goto error_out;
+
+ ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
+ if (ret)
+ goto error_out;
+
+out:
+ dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
+ __func__, new_devdata->max_sync_size,
+ old_devdata->max_sync_size);
+ dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
+ __func__, new_devdata->max_sync_sg,
+ old_devdata->max_sync_sg);
+ dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
+ __func__, new_devdata->max_sg_len,
+ old_devdata->max_sg_len);
+
+ rcu_assign_pointer(devdata, new_devdata);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ dev_set_drvdata(new_devdata->dev, new_devdata);
+ kfree(old_devdata);
+ return 0;
+
+error_out:
+ if (new_devdata) {
+ dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
+ nx842_OF_set_defaults(new_devdata);
+ rcu_assign_pointer(devdata, new_devdata);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ dev_set_drvdata(new_devdata->dev, new_devdata);
+ kfree(old_devdata);
+ } else {
+ dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ }
+
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+}
+
+/**
+ * nx842_OF_notifier - Process updates to OF properties for the device
+ *
+ * @np: notifier block
+ * @action: notifier action
+ * @update: struct pSeries_reconfig_prop_update pointer if action is
+ * PSERIES_UPDATE_PROPERTY
+ *
+ * Returns:
+ * NOTIFY_OK on success
+ * NOTIFY_BAD encoded with error number on failure, use
+ * notifier_to_errno() to decode this value
+ */
+static int nx842_OF_notifier(struct notifier_block *np,
+ unsigned long action,
+ void *update)
+{
+ struct pSeries_reconfig_prop_update *upd;
+ struct nx842_devdata *local_devdata;
+ struct device_node *node = NULL;
+
+ upd = (struct pSeries_reconfig_prop_update *)update;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (local_devdata)
+ node = local_devdata->dev->of_node;
+
+ if (local_devdata &&
+ action == PSERIES_UPDATE_PROPERTY &&
+ !strcmp(upd->node->name, node->name)) {
+ rcu_read_unlock();
+ nx842_OF_upd(upd->property);
+ } else
+ rcu_read_unlock();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nx842_of_nb = {
+ .notifier_call = nx842_OF_notifier,
+};
+
+#define nx842_counter_read(_name) \
+static ssize_t nx842_##_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) { \
+ struct nx842_devdata *local_devdata; \
+ int p = 0; \
+ rcu_read_lock(); \
+ local_devdata = rcu_dereference(devdata); \
+ if (local_devdata) \
+ p = snprintf(buf, PAGE_SIZE, "%ld\n", \
+ atomic64_read(&local_devdata->counters->_name)); \
+ rcu_read_unlock(); \
+ return p; \
+}
+
+#define NX842DEV_COUNTER_ATTR_RO(_name) \
+ nx842_counter_read(_name); \
+ static struct device_attribute dev_attr_##_name = __ATTR(_name, \
+ 0444, \
+ nx842_##_name##_show,\
+ NULL);
+
+NX842DEV_COUNTER_ATTR_RO(comp_complete);
+NX842DEV_COUNTER_ATTR_RO(comp_failed);
+NX842DEV_COUNTER_ATTR_RO(decomp_complete);
+NX842DEV_COUNTER_ATTR_RO(decomp_failed);
+NX842DEV_COUNTER_ATTR_RO(swdecomp);
+
+static ssize_t nx842_timehist_show(struct device *,
+ struct device_attribute *, char *);
+
+static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
+ nx842_timehist_show, NULL);
+static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
+ 0444, nx842_timehist_show, NULL);
+
+static ssize_t nx842_timehist_show(struct device *dev,
+ struct device_attribute *attr, char *buf) {
+ char *p = buf;
+ struct nx842_devdata *local_devdata;
+ atomic64_t *times;
+ int bytes_remain = PAGE_SIZE;
+ int bytes;
+ int i;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (!local_devdata) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (attr == &dev_attr_comp_times)
+ times = local_devdata->counters->comp_times;
+ else if (attr == &dev_attr_decomp_times)
+ times = local_devdata->counters->decomp_times;
+ else {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
+ bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
+ i ? (2<<(i-1)) : 0, (2<<i)-1,
+ atomic64_read(&times[i]));
+ bytes_remain -= bytes;
+ p += bytes;
+ }
+ /* The last bucket holds everything over
+ * 2<<(NX842_HIST_SLOTS - 2) us */
+ bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
+ 2<<(NX842_HIST_SLOTS - 2),
+ atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
+ p += bytes;
+
+ rcu_read_unlock();
+ return p - buf;
+}
+
+static struct attribute *nx842_sysfs_entries[] = {
+ &dev_attr_comp_complete.attr,
+ &dev_attr_comp_failed.attr,
+ &dev_attr_decomp_complete.attr,
+ &dev_attr_decomp_failed.attr,
+ &dev_attr_swdecomp.attr,
+ &dev_attr_comp_times.attr,
+ &dev_attr_decomp_times.attr,
+ NULL,
+};
+
+static struct attribute_group nx842_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = nx842_sysfs_entries,
+};
+
+static int __init nx842_probe(struct vio_dev *viodev,
+ const struct vio_device_id *id)
+{
+ struct nx842_devdata *old_devdata, *new_devdata = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+
+ if (old_devdata && old_devdata->vdev != NULL) {
+ dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
+ ret = -1;
+ goto error_unlock;
+ }
+
+ dev_set_drvdata(&viodev->dev, NULL);
+
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata) {
+ dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
+ ret = -ENOMEM;
+ goto error_unlock;
+ }
+
+ new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+ GFP_NOFS);
+ if (!new_devdata->counters) {
+ dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
+ ret = -ENOMEM;
+ goto error_unlock;
+ }
+
+ new_devdata->vdev = viodev;
+ new_devdata->dev = &viodev->dev;
+ nx842_OF_set_defaults(new_devdata);
+
+ rcu_assign_pointer(devdata, new_devdata);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ kfree(old_devdata);
+
+ pSeries_reconfig_notifier_register(&nx842_of_nb);
+
+ ret = nx842_OF_upd(NULL);
+ if (ret && ret != -ENODEV) {
+ dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
+ ret = -1;
+ goto error;
+ }
+
+ rcu_read_lock();
+ if (dev_set_drvdata(&viodev->dev, rcu_dereference(devdata))) {
+ rcu_read_unlock();
+ dev_err(&viodev->dev, "failed to set driver data for device\n");
+ ret = -1;
+ goto error;
+ }
+ rcu_read_unlock();
+
+ if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
+ dev_err(&viodev->dev, "could not create sysfs device attributes\n");
+ ret = -1;
+ goto error;
+ }
+
+ return 0;
+
+error_unlock:
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ if (new_devdata)
+ kfree(new_devdata->counters);
+ kfree(new_devdata);
+error:
+ return ret;
+}
+
+static int __exit nx842_remove(struct vio_dev *viodev)
+{
+ struct nx842_devdata *old_devdata;
+ unsigned long flags;
+
+ pr_info("Removing IBM Power 842 compression device\n");
+ sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+ pSeries_reconfig_notifier_unregister(&nx842_of_nb);
+ rcu_assign_pointer(devdata, NULL);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ dev_set_drvdata(&viodev->dev, NULL);
+ if (old_devdata)
+ kfree(old_devdata->counters);
+ kfree(old_devdata);
+ return 0;
+}
+
+static struct vio_device_id nx842_driver_ids[] = {
+ {"ibm,compression-v1", "ibm,compression"},
+ {"", ""},
+};
+
+static struct vio_driver nx842_driver = {
+ .name = MODULE_NAME,
+ .probe = nx842_probe,
+ .remove = nx842_remove,
+ .get_desired_dma = nx842_get_desired_dma,
+ .id_table = nx842_driver_ids,
+};
+
+static int __init nx842_init(void)
+{
+ struct nx842_devdata *new_devdata;
+ pr_info("Registering IBM Power 842 compression driver\n");
+
+ RCU_INIT_POINTER(devdata, NULL);
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
+ if (!new_devdata) {
+ pr_err("Could not allocate memory for device data\n");
+ return -ENOMEM;
+ }
+ new_devdata->status = UNAVAILABLE;
+ RCU_INIT_POINTER(devdata, new_devdata);
+
+ return vio_register_driver(&nx842_driver);
+}
+
+module_init(nx842_init);
+
+static void __exit nx842_exit(void)
+{
+ struct nx842_devdata *old_devdata;
+ unsigned long flags;
+
+ pr_info("Exiting IBM Power 842 compression driver\n");
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+ rcu_assign_pointer(devdata, NULL);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ if (old_devdata)
+ dev_set_drvdata(old_devdata->dev, NULL);
+ kfree(old_devdata);
+ vio_unregister_driver(&nx842_driver);
+}
+
+module_exit(nx842_exit);
+
+/*********************************
+ * 842 software decompressor
+*********************************/
+typedef int (*sw842_template_op)(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+
+static int sw842_data8(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+static int sw842_data4(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+static int sw842_data2(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+static int sw842_ptr8(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+static int sw842_ptr4(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+static int sw842_ptr2(const char **, int *, unsigned char **,
+ struct sw842_fifo *);
+
+/* special templates */
+#define SW842_TMPL_REPEAT 0x1B
+#define SW842_TMPL_ZEROS 0x1C
+#define SW842_TMPL_EOF 0x1E
+
+static sw842_template_op sw842_tmpl_ops[26][4] = {
+ { sw842_data8, NULL}, /* 0 (00000) */
+ { sw842_data4, sw842_data2, sw842_ptr2, NULL},
+ { sw842_data4, sw842_ptr2, sw842_data2, NULL},
+ { sw842_data4, sw842_ptr2, sw842_ptr2, NULL},
+ { sw842_data4, sw842_ptr4, NULL},
+ { sw842_data2, sw842_ptr2, sw842_data4, NULL},
+ { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2},
+ { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2},
+ { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,},
+ { sw842_data2, sw842_ptr2, sw842_ptr4, NULL},
+ { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */
+ { sw842_ptr2, sw842_data4, sw842_ptr2, NULL},
+ { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2},
+ { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2},
+ { sw842_ptr2, sw842_data2, sw842_ptr4, NULL},
+ { sw842_ptr2, sw842_ptr2, sw842_data4, NULL},
+ { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2},
+ { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2},
+ { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2},
+ { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL},
+ { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */
+ { sw842_ptr4, sw842_data2, sw842_ptr2, NULL},
+ { sw842_ptr4, sw842_ptr2, sw842_data2, NULL},
+ { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL},
+ { sw842_ptr4, sw842_ptr4, NULL},
+ { sw842_ptr8, NULL}
+};
+
+/* Software decompress helpers */
+
+static uint8_t sw842_get_byte(const char *buf, int bit)
+{
+ uint8_t tmpl;
+ uint16_t tmp;
+ tmp = htons(*(uint16_t *)(buf));
+ tmp = (uint16_t)(tmp << bit);
+ tmp = ntohs(tmp);
+ memcpy(&tmpl, &tmp, 1);
+ return tmpl;
+}
+
+static uint8_t sw842_get_template(const char **buf, int *bit)
+{
+ uint8_t byte;
+ byte = sw842_get_byte(*buf, *bit);
+ byte = byte >> 3;
+ byte &= 0x1F;
+ *buf += (*bit + 5) / 8;
+ *bit = (*bit + 5) % 8;
+ return byte;
+}
+
+/* repeat_count happens to be 5-bit too (like the template) */
+static uint8_t sw842_get_repeat_count(const char **buf, int *bit)
+{
+ uint8_t byte;
+ byte = sw842_get_byte(*buf, *bit);
+ byte = byte >> 2;
+ byte &= 0x3F;
+ *buf += (*bit + 6) / 8;
+ *bit = (*bit + 6) % 8;
+ return byte;
+}
+
+static uint8_t sw842_get_ptr2(const char **buf, int *bit)
+{
+ uint8_t ptr;
+ ptr = sw842_get_byte(*buf, *bit);
+ (*buf)++;
+ return ptr;
+}
+
+static uint16_t sw842_get_ptr4(const char **buf, int *bit,
+ struct sw842_fifo *fifo)
+{
+ uint16_t ptr;
+ ptr = htons(*(uint16_t *)(*buf));
+ ptr = (uint16_t)(ptr << *bit);
+ ptr = ptr >> 7;
+ ptr &= 0x01FF;
+ *buf += (*bit + 9) / 8;
+ *bit = (*bit + 9) % 8;
+ return ptr;
+}
+
+static uint8_t sw842_get_ptr8(const char **buf, int *bit,
+ struct sw842_fifo *fifo)
+{
+ return sw842_get_ptr2(buf, bit);
+}
+
+/* Software decompress template ops */
+
+static int sw842_data8(const char **inbuf, int *inbit,
+ unsigned char **outbuf, struct sw842_fifo *fifo)
+{
+ int ret;
+
+ ret = sw842_data4(inbuf, inbit, outbuf, fifo);
+ if (ret)
+ return ret;
+ ret = sw842_data4(inbuf, inbit, outbuf, fifo);
+ return ret;
+}
+
+static int sw842_data4(const char **inbuf, int *inbit,
+ unsigned char **outbuf, struct sw842_fifo *fifo)
+{
+ int ret;
+
+ ret = sw842_data2(inbuf, inbit, outbuf, fifo);
+ if (ret)
+ return ret;
+ ret = sw842_data2(inbuf, inbit, outbuf, fifo);
+ return ret;
+}
+
+static int sw842_data2(const char **inbuf, int *inbit,
+ unsigned char **outbuf, struct sw842_fifo *fifo)
+{
+ **outbuf = sw842_get_byte(*inbuf, *inbit);
+ (*inbuf)++;
+ (*outbuf)++;
+ **outbuf = sw842_get_byte(*inbuf, *inbit);
+ (*inbuf)++;
+ (*outbuf)++;
+ return 0;
+}
+
+static int sw842_ptr8(const char **inbuf, int *inbit,
+ unsigned char **outbuf, struct sw842_fifo *fifo)
+{
+ uint8_t ptr;
+ ptr = sw842_get_ptr8(inbuf, inbit, fifo);
+ if (!fifo->f84_full && (ptr >= fifo->f8_count))
+ return 1;
+ memcpy(*outbuf, fifo->f8[ptr], 8);
+ *outbuf += 8;
+ return 0;
+}
+
+static int sw842_ptr4(const char **inbuf, int *inbit,
+ unsigned char **outbuf, struct sw842_fifo *fifo)
+{
+ uint16_t ptr;
+ ptr = sw842_get_ptr4(inbuf, inbit, fifo);
+ if (!fifo->f84_full && (ptr >= fifo->f4_count))
+ return 1;
+ memcpy(*outbuf, fifo->f4[ptr], 4);
+ *outbuf += 4;
+ return 0;
+}
+
+static int sw842_ptr2(const char **inbuf, int *inbit,
+ unsigned char **outbuf, struct sw842_fifo *fifo)
+{
+ uint8_t ptr;
+ ptr = sw842_get_ptr2(inbuf, inbit);
+ if (!fifo->f2_full && (ptr >= fifo->f2_count))
+ return 1;
+ memcpy(*outbuf, fifo->f2[ptr], 2);
+ *outbuf += 2;
+ return 0;
+}
+
+static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo)
+{
+ unsigned char initial_f2count = fifo->f2_count;
+
+ memcpy(fifo->f8[fifo->f8_count], buf, 8);
+ fifo->f4_count += 2;
+ fifo->f8_count += 1;
+
+ if (!fifo->f84_full && fifo->f4_count >= 512) {
+ fifo->f84_full = 1;
+ fifo->f4_count /= 512;
+ }
+
+ memcpy(fifo->f2[fifo->f2_count++], buf, 2);
+ memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2);
+ memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2);
+ memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2);
+ if (fifo->f2_count < initial_f2count)
+ fifo->f2_full = 1;
+}
+
+static int sw842_decompress(const unsigned char *src, int srclen,
+ unsigned char *dst, int *destlen,
+ const void *wrkmem)
+{
+ uint8_t tmpl;
+ const char *inbuf;
+ int inbit = 0;
+ unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf;
+ const char *inbuf_end;
+ sw842_template_op op;
+ int opindex;
+ int i, repeat_count;
+ struct sw842_fifo *fifo;
+ int ret = 0;
+
+ fifo = &((struct nx842_workmem *)(wrkmem))->swfifo;
+ memset(fifo, 0, sizeof(*fifo));
+
+ origbuf = NULL;
+ inbuf = src;
+ inbuf_end = src + srclen;
+ outbuf = dst;
+ outbuf_end = dst + *destlen;
+
+ while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) {
+ if (inbuf >= inbuf_end) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ opindex = 0;
+ prevbuf = origbuf;
+ origbuf = outbuf;
+ switch (tmpl) {
+ case SW842_TMPL_REPEAT:
+ if (prevbuf == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ repeat_count = sw842_get_repeat_count(&inbuf,
+ &inbit) + 1;
+
+ /* Did the repeat count advance past the end of input */
+ if (inbuf > inbuf_end) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < repeat_count; i++) {
+ /* Would this overflow the output buffer */
+ if ((outbuf + 8) > outbuf_end) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(outbuf, prevbuf, 8);
+ sw842_copy_to_fifo(outbuf, fifo);
+ outbuf += 8;
+ }
+ break;
+
+ case SW842_TMPL_ZEROS:
+ /* Would this overflow the output buffer */
+ if ((outbuf + 8) > outbuf_end) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ memset(outbuf, 0, 8);
+ sw842_copy_to_fifo(outbuf, fifo);
+ outbuf += 8;
+ break;
+
+ default:
+ if (tmpl > 25) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Does this go past the end of the input buffer */
+ if ((inbuf + 2) > inbuf_end) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Would this overflow the output buffer */
+ if ((outbuf + 8) > outbuf_end) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ while (opindex < 4 &&
+ (op = sw842_tmpl_ops[tmpl][opindex++])
+ != NULL) {
+ ret = (*op)(&inbuf, &inbit, &outbuf, fifo);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+ sw842_copy_to_fifo(origbuf, fifo);
+ }
+ }
+ }
+
+out:
+ if (!ret)
+ *destlen = (unsigned int)(outbuf - dst);
+ else
+ *destlen = 0;
+
+ return ret;
+}
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 69ed796ee327..a76d4c4f29f5 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -127,7 +127,6 @@ struct crypto_alg nx_cbc_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_cbc_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 7aeac678b9c0..ef5eae6d1400 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -430,7 +430,6 @@ struct crypto_alg nx_ccm_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ccm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
@@ -453,7 +452,6 @@ struct crypto_alg nx_ccm4309_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_nivaead_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ccm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 52d4eb05e8f7..b6286f14680b 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -141,7 +141,6 @@ struct crypto_alg nx_ctr_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
@@ -163,7 +162,6 @@ struct crypto_alg nx_ctr3686_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 7b77bc2d1df4..ba5f1611336f 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -126,7 +126,6 @@ struct crypto_alg nx_ecb_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ecb_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 9ab1c7341dac..c8109edc5cfb 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -316,7 +316,6 @@ struct crypto_alg nx_gcm_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_gcm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
@@ -338,7 +337,6 @@ struct crypto_alg nx_gcm4106_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_nivaead_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_gcm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 63e57b57a12c..093a8af59cbe 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -876,7 +876,6 @@ static int omap_aes_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++) {
pr_debug("i: %d\n", i);
- INIT_LIST_HEAD(&algs[i].cra_list);
err = crypto_register_alg(&algs[i]);
if (err)
goto err_algs;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 37b2e9406af6..633ba945e153 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -328,7 +328,6 @@ static struct crypto_alg aes_alg = {
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
@@ -408,7 +407,6 @@ static struct crypto_alg ecb_aes_alg = {
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -491,7 +489,6 @@ static struct crypto_alg cbc_aes_alg = {
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index bc986f806086..a22714412cda 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -626,7 +626,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
for (i = 0; i < ARRAY_SIZE(algs); i++) {
- INIT_LIST_HEAD(&algs[i].cra_list);
err = crypto_register_alg(&algs[i]);
if (err)
goto err_algs;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index efff788d2f1d..da1112765a44 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -38,6 +38,7 @@
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
@@ -714,8 +715,13 @@ badkey:
/*
* talitos_edesc - s/w-extended descriptor
+ * @assoc_nents: number of segments in associated data scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
+ * @assoc_chained: whether assoc is chained or not
+ * @src_chained: whether src is chained or not
+ * @dst_chained: whether dst is chained or not
+ * @iv_dma: dma address of iv for checking continuity and link table
* @dma_len: length of dma mapped link_tbl space
* @dma_link_tbl: bus physical address of link_tbl
* @desc: h/w descriptor
@@ -726,10 +732,13 @@ badkey:
* of link_tbl data
*/
struct talitos_edesc {
+ int assoc_nents;
int src_nents;
int dst_nents;
- int src_is_chained;
- int dst_is_chained;
+ bool assoc_chained;
+ bool src_chained;
+ bool dst_chained;
+ dma_addr_t iv_dma;
int dma_len;
dma_addr_t dma_link_tbl;
struct talitos_desc desc;
@@ -738,7 +747,7 @@ struct talitos_edesc {
static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
- int chained)
+ bool chained)
{
if (unlikely(chained))
while (sg) {
@@ -768,13 +777,13 @@ static void talitos_sg_unmap(struct device *dev,
unsigned int dst_nents = edesc->dst_nents ? : 1;
if (src != dst) {
- if (edesc->src_is_chained)
+ if (edesc->src_chained)
talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
else
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
if (dst) {
- if (edesc->dst_is_chained)
+ if (edesc->dst_chained)
talitos_unmap_sg_chain(dev, dst,
DMA_FROM_DEVICE);
else
@@ -782,7 +791,7 @@ static void talitos_sg_unmap(struct device *dev,
DMA_FROM_DEVICE);
}
} else
- if (edesc->src_is_chained)
+ if (edesc->src_chained)
talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
else
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
@@ -797,7 +806,13 @@ static void ipsec_esp_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
+ if (edesc->assoc_chained)
+ talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
+ else
+ /* assoc_nents counts also for IV in non-contiguous cases */
+ dma_unmap_sg(dev, areq->assoc,
+ edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
+ DMA_TO_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
@@ -825,9 +840,10 @@ static void ipsec_esp_encrypt_done(struct device *dev,
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
- if (edesc->dma_len) {
+ if (edesc->dst_nents) {
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
+ edesc->dst_nents + 2 +
+ edesc->assoc_nents];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
icvdata, ctx->authsize);
@@ -857,7 +873,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
/* auth check */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
+ edesc->dst_nents + 2 +
+ edesc->assoc_nents];
else
icvdata = &edesc->link_tbl[0];
@@ -932,10 +949,9 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- u8 *giv, u64 seq,
- void (*callback) (struct device *dev,
- struct talitos_desc *desc,
- void *context, int error))
+ u64 seq, void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
@@ -950,12 +966,42 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE);
+
/* hmac data */
- map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
- sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
+ desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
+ if (edesc->assoc_nents) {
+ int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+
+ to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
+ sizeof(struct talitos_ptr));
+ desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
+
+ /* assoc_nents - 1 entries for assoc, 1 for IV */
+ sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
+ areq->assoclen, tbl_ptr);
+
+ /* add IV to link table */
+ tbl_ptr += sg_count - 1;
+ tbl_ptr->j_extent = 0;
+ tbl_ptr++;
+ to_talitos_ptr(tbl_ptr, edesc->iv_dma);
+ tbl_ptr->len = cpu_to_be16(ivsize);
+ tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+ } else {
+ to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc));
+ desc->ptr[1].j_extent = 0;
+ }
+
/* cipher iv */
- map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
+ desc->ptr[2].len = cpu_to_be16(ivsize);
+ desc->ptr[2].j_extent = 0;
+ /* Sync needed for the aead_givencrypt case */
+ dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
@@ -974,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE,
- edesc->src_is_chained);
+ edesc->src_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
@@ -1006,32 +1052,30 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (areq->src != areq->dst)
sg_count = talitos_map_sg(dev, areq->dst,
edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE,
- edesc->dst_is_chained);
+ DMA_FROM_DEVICE, edesc->dst_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
} else {
- struct talitos_ptr *link_tbl_ptr =
- &edesc->link_tbl[edesc->src_nents + 1];
+ int tbl_off = edesc->src_nents + 1;
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
- (edesc->src_nents + 1) *
- sizeof(struct talitos_ptr));
+ tbl_off * sizeof(struct talitos_ptr));
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
- link_tbl_ptr);
+ tbl_ptr);
/* Add an entry to the link table for ICV data */
- link_tbl_ptr += sg_count - 1;
- link_tbl_ptr->j_extent = 0;
- sg_count++;
- link_tbl_ptr++;
- link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
- link_tbl_ptr->len = cpu_to_be16(authsize);
+ tbl_ptr += sg_count - 1;
+ tbl_ptr->j_extent = 0;
+ tbl_ptr++;
+ tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+ tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */
- to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
- (edesc->src_nents + edesc->dst_nents + 2) *
+ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
+ (tbl_off + edesc->dst_nents + 1 +
+ edesc->assoc_nents) *
sizeof(struct talitos_ptr));
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1053,17 +1097,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
/*
* derive number of elements in scatterlist
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
- *chained = 0;
+ *chained = false;
while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = 1;
+ *chained = true;
sg = scatterwalk_sg_next(sg);
}
@@ -1132,17 +1176,21 @@ static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ struct scatterlist *assoc,
struct scatterlist *src,
struct scatterlist *dst,
- int hash_result,
+ u8 *iv,
+ unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize,
+ unsigned int ivsize,
int icv_stashing,
u32 cryptoflags)
{
struct talitos_edesc *edesc;
- int src_nents, dst_nents, alloc_len, dma_len;
- int src_chained, dst_chained = 0;
+ int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1151,10 +1199,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-EINVAL);
}
+ if (iv)
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+
+ if (assoc) {
+ /*
+ * Currently it is assumed that iv is provided whenever assoc
+ * is.
+ */
+ BUG_ON(!iv);
+
+ assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
+ talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
+ assoc_chained);
+ assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
+
+ if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
+ assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
+ }
+
src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- if (hash_result) {
+ if (!dst) {
dst_nents = 0;
} else {
if (dst == src) {
@@ -1172,9 +1239,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
* and the ICV data itself
*/
alloc_len = sizeof(struct talitos_edesc);
- if (src_nents || dst_nents) {
- dma_len = (src_nents + dst_nents + 2) *
- sizeof(struct talitos_ptr) + authsize;
+ if (assoc_nents || src_nents || dst_nents) {
+ dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
+ sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
@@ -1183,14 +1250,20 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
+ talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
+ edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->src_is_chained = src_chained;
- edesc->dst_is_chained = dst_chained;
+ edesc->assoc_chained = assoc_chained;
+ edesc->src_chained = src_chained;
+ edesc->dst_chained = dst_chained;
+ edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
if (dma_len)
edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
@@ -1200,14 +1273,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return edesc;
}
-static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
+static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
int icv_stashing)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
- areq->cryptlen, ctx->authsize, icv_stashing,
+ return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
+ iv, areq->assoclen, areq->cryptlen,
+ ctx->authsize, ivsize, icv_stashing,
areq->base.flags);
}
@@ -1218,14 +1293,14 @@ static int aead_encrypt(struct aead_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, 0);
+ edesc = aead_edesc_alloc(req, req->iv, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
@@ -1241,7 +1316,7 @@ static int aead_decrypt(struct aead_request *req)
req->cryptlen -= authsize;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, 1);
+ edesc = aead_edesc_alloc(req, req->iv, 1);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1257,9 +1332,7 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, NULL, 0,
- ipsec_esp_decrypt_hwauth_done);
-
+ return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
@@ -1268,7 +1341,8 @@ static int aead_decrypt(struct aead_request *req)
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
+ edesc->dst_nents + 2 +
+ edesc->assoc_nents];
else
icvdata = &edesc->link_tbl[0];
@@ -1277,7 +1351,7 @@ static int aead_decrypt(struct aead_request *req)
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
ctx->authsize);
- return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
+ return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
}
static int aead_givencrypt(struct aead_givcrypt_request *req)
@@ -1288,7 +1362,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(areq, 0);
+ edesc = aead_edesc_alloc(areq, req->giv, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1299,8 +1373,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
/* avoid consecutive packets going out with same IV */
*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
- return ipsec_esp(edesc, areq, req->giv, req->seq,
- ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1356,7 +1429,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->nbytes;
- unsigned int ivsize;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
int sg_count, ret;
/* first DWORD empty */
@@ -1365,9 +1438,9 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
desc->ptr[0].j_extent = 0;
/* cipher iv */
- ivsize = crypto_ablkcipher_ivsize(cipher);
- map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
+ desc->ptr[1].len = cpu_to_be16(ivsize);
+ desc->ptr[1].j_extent = 0;
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
@@ -1382,7 +1455,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE,
- edesc->src_is_chained);
+ edesc->src_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
@@ -1409,8 +1482,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
if (areq->src != areq->dst)
sg_count = talitos_map_sg(dev, areq->dst,
edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE,
- edesc->dst_is_chained);
+ DMA_FROM_DEVICE, edesc->dst_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
@@ -1450,9 +1522,11 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
- areq->nbytes, 0, 0, areq->base.flags);
+ return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
+ areq->info, 0, areq->nbytes, 0, ivsize, 0,
+ areq->base.flags);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1578,8 +1652,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = talitos_map_sg(dev, req_ctx->psrc,
edesc->src_nents ? : 1,
- DMA_TO_DEVICE,
- edesc->src_is_chained);
+ DMA_TO_DEVICE, edesc->src_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
@@ -1631,8 +1704,8 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
- nbytes, 0, 0, areq->base.flags);
+ return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
+ nbytes, 0, 0, 0, areq->base.flags);
}
static int ahash_init(struct ahash_request *areq)
@@ -1690,7 +1763,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int nbytes_to_hash;
unsigned int to_hash_later;
unsigned int nsg;
- int chained;
+ bool chained;
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
@@ -1902,21 +1975,18 @@ struct talitos_alg_template {
};
static struct talitos_alg_template driver_algs[] = {
- /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
+ /*
+ * AEAD algorithms. These use a single-pass ipsec_esp descriptor.
+ * authencesn(*,*) is also registered, although not present
+ * explicitly here.
+ */
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
@@ -1935,14 +2005,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
@@ -1962,14 +2025,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
}
@@ -1988,14 +2044,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
}
@@ -2015,14 +2064,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}
@@ -2041,14 +2083,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}
@@ -2068,14 +2103,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
}
@@ -2094,14 +2122,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
}
@@ -2121,14 +2142,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
}
@@ -2147,14 +2161,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
}
@@ -2174,14 +2181,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
@@ -2200,14 +2200,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_aead_type,
.cra_aead = {
- .setkey = aead_setkey,
- .setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
@@ -2229,12 +2222,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ablkcipher_type,
.cra_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2251,12 +2239,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ablkcipher_type,
.cra_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2270,11 +2253,6 @@ static struct talitos_alg_template driver_algs[] = {
/* AHASH algorithms. */
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
.halg.digestsize = MD5_DIGEST_SIZE,
.halg.base = {
.cra_name = "md5",
@@ -2282,7 +2260,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = MD5_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2291,11 +2268,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha1",
@@ -2303,7 +2275,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2312,11 +2283,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha224",
@@ -2324,7 +2290,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2333,11 +2298,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha256",
@@ -2345,7 +2305,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2354,11 +2313,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha384",
@@ -2366,7 +2320,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2375,11 +2328,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha512",
@@ -2387,7 +2335,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2396,12 +2343,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
- .setkey = ahash_setkey,
.halg.digestsize = MD5_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(md5)",
@@ -2409,7 +2350,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = MD5_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2418,12 +2358,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
- .setkey = ahash_setkey,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
@@ -2431,7 +2365,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2440,12 +2373,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
- .setkey = ahash_setkey,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
@@ -2453,7 +2380,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2462,12 +2388,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
- .setkey = ahash_setkey,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
@@ -2475,7 +2395,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2484,12 +2403,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
- .setkey = ahash_setkey,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
@@ -2497,7 +2410,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2506,12 +2418,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
- .init = ahash_init,
- .update = ahash_update,
- .final = ahash_final,
- .finup = ahash_finup,
- .digest = ahash_digest,
- .setkey = ahash_setkey,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
@@ -2519,7 +2425,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
- .cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2677,14 +2582,34 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init;
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher.setkey = ablkcipher_setkey;
+ alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
+ alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+ alg->cra_ablkcipher.geniv = "eseqiv";
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init_aead;
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead.setkey = aead_setkey;
+ alg->cra_aead.setauthsize = aead_setauthsize;
+ alg->cra_aead.encrypt = aead_encrypt;
+ alg->cra_aead.decrypt = aead_decrypt;
+ alg->cra_aead.givencrypt = aead_givencrypt;
+ alg->cra_aead.geniv = "<built-in>";
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
+ alg->cra_type = &crypto_ahash_type;
+ t_alg->algt.alg.hash.init = ahash_init;
+ t_alg->algt.alg.hash.update = ahash_update;
+ t_alg->algt.alg.hash.final = ahash_final;
+ t_alg->algt.alg.hash.finup = ahash_finup;
+ t_alg->algt.alg.hash.digest = ahash_digest;
+ t_alg->algt.alg.hash.setkey = ahash_setkey;
+
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
kfree(t_alg);
@@ -2896,7 +2821,9 @@ static int talitos_probe(struct platform_device *ofdev)
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
char *name = NULL;
+ bool authenc = false;
+authencesn:
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@@ -2911,6 +2838,8 @@ static int talitos_probe(struct platform_device *ofdev)
err = crypto_register_alg(
&t_alg->algt.alg.crypto);
name = t_alg->algt.alg.crypto.cra_driver_name;
+ authenc = authenc ? !authenc :
+ !(bool)memcmp(name, "authenc", 7);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = crypto_register_ahash(
@@ -2923,8 +2852,25 @@ static int talitos_probe(struct platform_device *ofdev)
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
- } else
+ } else {
list_add_tail(&t_alg->entry, &priv->alg_list);
+ if (authenc) {
+ struct crypto_alg *alg =
+ &driver_algs[i].alg.crypto;
+
+ name = alg->cra_name;
+ memmove(name + 10, name + 7,
+ strlen(name) - 7);
+ memcpy(name + 7, "esn", 3);
+
+ name = alg->cra_driver_name;
+ memmove(name + 10, name + 7,
+ strlen(name) - 7);
+ memcpy(name + 7, "esn", 3);
+
+ goto authencesn;
+ }
+ }
}
}
if (!list_empty(&priv->alg_list))
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index ac236f6724f4..37185e6630cd 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -969,6 +969,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
if (!aes_wq) {
dev_err(dev, "alloc_workqueue failed\n");
+ err = -ENOMEM;
goto out;
}
@@ -1004,8 +1005,6 @@ static int tegra_aes_probe(struct platform_device *pdev)
aes_dev = dd;
for (i = 0; i < ARRAY_SIZE(algs); i++) {
- INIT_LIST_HEAD(&algs[i].cra_list);
-
algs[i].cra_priority = 300;
algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx);
algs[i].cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index ef17e3871c71..bc615cc56266 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1486,6 +1486,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
if (!res_irq) {
dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
__func__);
+ ret = -ENODEV;
goto out_power;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 08765072a2b3..632c3339895f 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1991,7 +1991,6 @@ static int __init ux500_hash_mod_init(void)
static void __exit ux500_hash_mod_fini(void)
{
platform_driver_unregister(&hash_driver);
- return;
}
module_init(ux500_hash_mod_init);