summaryrefslogtreecommitdiff
path: root/drivers/crypto/vmx/aes_cbc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/vmx/aes_cbc.c')
-rw-r--r--drivers/crypto/vmx/aes_cbc.c183
1 files changed, 67 insertions, 116 deletions
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c7e515a1bc97..d88084447f1c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -7,64 +7,52 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
-static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
+ fallback = crypto_alloc_skcipher("cbc(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
+static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
@@ -75,108 +63,71 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK,
- &ctx->enc_key, walk.iv, 1);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
}
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ enc ? &ctx->enc_key : &ctx->dec_key,
+ walk.iv, enc);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+ }
return ret;
}
-static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_cbc_encrypt(struct skcipher_request *req)
{
- int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
-
- if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK,
- &ctx->dec_key, walk.iv, 0);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
- }
-
- return ret;
+ return p8_aes_cbc_crypt(req, 1);
}
+static int p8_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return p8_aes_cbc_crypt(req, 0);
+}
-struct crypto_alg p8_aes_cbc_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "p8_aes_cbc",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
- .cra_init = p8_aes_cbc_init,
- .cra_exit = p8_aes_cbc_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = p8_aes_cbc_setkey,
- .encrypt = p8_aes_cbc_encrypt,
- .decrypt = p8_aes_cbc_decrypt,
- },
+struct skcipher_alg p8_aes_cbc_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "p8_aes_cbc",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+ .setkey = p8_aes_cbc_setkey,
+ .encrypt = p8_aes_cbc_encrypt,
+ .decrypt = p8_aes_cbc_decrypt,
+ .init = p8_aes_cbc_init,
+ .exit = p8_aes_cbc_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
};