summaryrefslogtreecommitdiff
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2008-02-08 05:11:07 +0300
committerAlasdair G Kergon <agk@redhat.com>2008-02-08 05:11:07 +0300
commitddd42edfd8ec44595b1501318512bc29a36f015f (patch)
treec102f6dd5ffad033531352ef53508db4bbefb382 /drivers/md/dm-crypt.c
parent01482b7671d014aa44f2efbc1153f4e3f48d7fb3 (diff)
downloadlinux-ddd42edfd8ec44595b1501318512bc29a36f015f.tar.xz
dm crypt: add async request mempool
dm-crypt: Use crypto ablkcipher interface Introduce mempool for async crypto requests. cc->req is used mainly during synchronous operations (to prevent allocation and deallocation of the same object). Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c45
1 files changed, 43 insertions, 2 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2da9b9536afb..79316580c780 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -80,10 +80,11 @@ struct crypt_config {
sector_t start;
/*
- * pool for per bio private data and
- * for encryption buffer pages
+ * pool for per bio private data, crypto requests and
+ * encryption requeusts/buffer pages
*/
mempool_t *io_pool;
+ mempool_t *req_pool;
mempool_t *page_pool;
struct bio_set *bs;
@@ -101,6 +102,22 @@ struct crypt_config {
sector_t iv_offset;
unsigned int iv_size;
+ /*
+ * Layout of each crypto request:
+ *
+ * struct ablkcipher_request
+ * context
+ * padding
+ * struct dm_crypt_request
+ * padding
+ * IV
+ *
+ * The padding is added so that dm_crypt_request and the IV are
+ * correctly aligned.
+ */
+ unsigned int dmreq_start;
+ struct ablkcipher_request *req;
+
char cipher[CRYPTO_MAX_ALG_NAME];
char chainmode[CRYPTO_MAX_ALG_NAME];
struct crypto_blkcipher *tfm;
@@ -377,6 +394,13 @@ static int crypt_convert_block(struct crypt_config *cc,
ctx->sector);
}
+static void crypt_alloc_req(struct crypt_config *cc,
+ struct convert_context *ctx)
+{
+ if (!cc->req)
+ cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+}
+
/*
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
@@ -882,6 +906,17 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_slab_pool;
}
+ cc->dmreq_start = sizeof(struct ablkcipher_request);
+ cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
+
+ cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
+ sizeof(struct dm_crypt_request) + cc->iv_size);
+ if (!cc->req_pool) {
+ ti->error = "Cannot allocate crypt request mempool";
+ goto bad_req_pool;
+ }
+ cc->req = NULL;
+
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
@@ -955,6 +990,8 @@ bad_device:
bad_bs:
mempool_destroy(cc->page_pool);
bad_page_pool:
+ mempool_destroy(cc->req_pool);
+bad_req_pool:
mempool_destroy(cc->io_pool);
bad_slab_pool:
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
@@ -975,8 +1012,12 @@ static void crypt_dtr(struct dm_target *ti)
destroy_workqueue(cc->io_queue);
destroy_workqueue(cc->crypt_queue);
+ if (cc->req)
+ mempool_free(cc->req, cc->req_pool);
+
bioset_free(cc->bs);
mempool_destroy(cc->page_pool);
+ mempool_destroy(cc->req_pool);
mempool_destroy(cc->io_pool);
kfree(cc->iv_mode);