summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2020-11-22 02:50:02 +0300
committerSumit Semwal <sumit.semwal@linaro.org>2020-11-22 19:33:04 +0300
commitd963ab0f15fb0797ddd65b2653f3d65c2d7a08ec (patch)
tree3756e82291a535e5110a244780b63b551f353f8a /drivers/dma-buf
parent4c68e499bb9d6d9ec3e18fcb2f68641abb22464a (diff)
downloadlinux-d963ab0f15fb0797ddd65b2653f3d65c2d7a08ec.tar.xz
dma-buf: system_heap: Allocate higher order pages if available
While the system heap can return non-contiguous pages, try to allocate larger order pages if possible. This will allow slight performance gains and make implementing page pooling easier. Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Liam Mark <lmark@codeaurora.org> Cc: Laura Abbott <labbott@kernel.org> Cc: Brian Starkey <Brian.Starkey@arm.com> Cc: Hridya Valsaraju <hridya@google.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sandeep Patil <sspatil@google.com> Cc: Daniel Mentz <danielmentz@google.com> Cc: Chris Goldsworthy <cgoldswo@codeaurora.org> Cc: Ørjan Eide <orjan.eide@arm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Ezequiel Garcia <ezequiel@collabora.com> Cc: Simon Ser <contact@emersion.fr> Cc: James Jones <jajones@nvidia.com> Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Reviewed-by: Brian Starkey <brian.starkey@arm.com> Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org> Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-6-john.stultz@linaro.org
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/heaps/system_heap.c89
1 files changed, 71 insertions, 18 deletions
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 32b17a5c8079..17e0e9a68baf 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -40,6 +40,20 @@ struct dma_heap_attachment {
bool mapped;
};
+#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
+ | __GFP_NORETRY) & ~__GFP_RECLAIM) \
+ | __GFP_COMP)
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
+/*
+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
+ * of order 0 pages can significantly improve the performance of many IOMMUs
+ * by reducing TLB pressure and time spent updating page tables.
+ */
+static const unsigned int orders[] = {8, 4, 0};
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
static struct sg_table *dup_sg_table(struct sg_table *table)
{
struct sg_table *new_table;
@@ -275,8 +289,11 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
int i;
table = &buffer->sg_table;
- for_each_sgtable_sg(table, sg, i)
- __free_page(sg_page(sg));
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ __free_pages(page, compound_order(page));
+ }
sg_free_table(table);
kfree(buffer);
}
@@ -294,6 +311,26 @@ static const struct dma_buf_ops system_heap_buf_ops = {
.release = system_heap_dma_buf_release,
};
+static struct page *alloc_largest_available(unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ if (size < (PAGE_SIZE << orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_pages(order_flags[i], orders[i]);
+ if (!page)
+ continue;
+ return page;
+ }
+ return NULL;
+}
+
static int system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
@@ -301,11 +338,13 @@ static int system_heap_allocate(struct dma_heap *heap,
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ unsigned long size_remaining = len;
+ unsigned int max_order = orders[0];
struct dma_buf *dmabuf;
struct sg_table *table;
struct scatterlist *sg;
- pgoff_t pagecount;
- pgoff_t pg;
+ struct list_head pages;
+ struct page *page, *tmp_page;
int i, ret = -ENOMEM;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -317,25 +356,35 @@ static int system_heap_allocate(struct dma_heap *heap,
buffer->heap = heap;
buffer->len = len;
- table = &buffer->sg_table;
- pagecount = len / PAGE_SIZE;
- if (sg_alloc_table(table, pagecount, GFP_KERNEL))
- goto free_buffer;
-
- sg = table->sgl;
- for (pg = 0; pg < pagecount; pg++) {
- struct page *page;
+ INIT_LIST_HEAD(&pages);
+ i = 0;
+ while (size_remaining > 0) {
/*
* Avoid trying to allocate memory if the process
* has been killed by SIGKILL
*/
if (fatal_signal_pending(current))
- goto free_pages;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ goto free_buffer;
+
+ page = alloc_largest_available(size_remaining, max_order);
if (!page)
- goto free_pages;
+ goto free_buffer;
+
+ list_add_tail(&page->lru, &pages);
+ size_remaining -= page_size(page);
+ max_order = compound_order(page);
+ i++;
+ }
+
+ table = &buffer->sg_table;
+ if (sg_alloc_table(table, i, GFP_KERNEL))
+ goto free_buffer;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(page, tmp_page, &pages, lru) {
sg_set_page(sg, page, page_size(page), 0);
sg = sg_next(sg);
+ list_del(&page->lru);
}
/* create the dmabuf */
@@ -355,14 +404,18 @@ static int system_heap_allocate(struct dma_heap *heap,
/* just return, as put will call release and that will free */
return ret;
}
-
return ret;
free_pages:
- for_each_sgtable_sg(table, sg, i)
- __free_page(sg_page(sg));
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *p = sg_page(sg);
+
+ __free_pages(p, compound_order(p));
+ }
sg_free_table(table);
free_buffer:
+ list_for_each_entry_safe(page, tmp_page, &pages, lru)
+ __free_pages(page, compound_order(page));
kfree(buffer);
return ret;