diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8b47b913ee83..1dc6227d353e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -31,10 +31,10 @@ #include <asm/unaligned.h> #include <crypto/hash.h> #include <crypto/md5.h> -#include <crypto/algapi.h> #include <crypto/skcipher.h> #include <crypto/aead.h> #include <crypto/authenc.h> +#include <crypto/utils.h> #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ #include <linux/key-type.h> #include <keys/user-type.h> @@ -745,16 +745,23 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { - u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); + struct crypto_skcipher *tfm = any_tfm(cc); struct skcipher_request *req; struct scatterlist src, dst; DECLARE_CRYPTO_WAIT(wait); + unsigned int reqsize; int err; + u8 *buf; - req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); + reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64)); + + req = kmalloc(reqsize + cc->iv_size, GFP_NOIO); if (!req) return -ENOMEM; + skcipher_request_set_tfm(req, tfm); + + buf = (u8 *)req + reqsize; memset(buf, 0, cc->iv_size); *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); @@ -763,7 +770,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); skcipher_request_set_callback(req, 0, crypto_req_done, &wait); err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); - skcipher_request_free(req); + kfree_sensitive(req); return err; } @@ -1661,6 +1668,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); * In order to not degrade performance with excessive locking, we try * non-blocking allocations without a mutex first but on failure we fallback * to blocking allocations with a mutex. + * + * In order to reduce allocation overhead, we try to allocate compound pages in + * the first pass. If they are not available, we fall back to the mempool. */ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) { @@ -1668,8 +1678,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; - unsigned int i, len, remaining_size; - struct page *page; + unsigned int remaining_size; + unsigned int order = MAX_ORDER - 1; retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) @@ -1682,20 +1692,34 @@ retry: remaining_size = size; - for (i = 0; i < nr_iovecs; i++) { - page = mempool_alloc(&cc->page_pool, gfp_mask); - if (!page) { + while (remaining_size) { + struct page *pages; + unsigned size_to_add; + unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT); + order = min(order, remaining_order); + + while (order > 0) { + pages = alloc_pages(gfp_mask + | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, + order); + if (likely(pages != NULL)) + goto have_pages; + order--; + } + + pages = mempool_alloc(&cc->page_pool, gfp_mask); + if (!pages) { crypt_free_buffer_pages(cc, clone); bio_put(clone); gfp_mask |= __GFP_DIRECT_RECLAIM; + order = 0; goto retry; } - len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; - - bio_add_page(clone, page, len, 0); - - remaining_size -= len; +have_pages: + size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size); + __bio_add_page(clone, pages, size_to_add, 0); + remaining_size -= size_to_add; } /* Allocate space for integrity tags */ @@ -1713,12 +1737,15 @@ retry: static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) { - struct bio_vec *bv; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bv, clone, iter_all) { - BUG_ON(!bv->bv_page); - mempool_free(bv->bv_page, &cc->page_pool); + if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */ + bio_for_each_folio_all(fi, clone) { + if (folio_test_large(fi.folio)) + folio_put(fi.folio); + else + mempool_free(&fi.folio->page, &cc->page_pool); + } } } @@ -2888,7 +2915,7 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key ret = crypt_ctr_auth_cipher(cc, cipher_api); if (ret < 0) { ti->error = "Invalid AEAD cipher spec"; - return -ENOMEM; + return ret; } } @@ -3256,7 +3283,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->per_bio_data_size = ti->per_io_data_size = ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, - ARCH_KMALLOC_MINALIGN); + ARCH_DMA_MINALIGN); ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc); if (ret) { |