summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2020-10-21 15:06:49 +0300
committerChristian König <christian.koenig@amd.com>2020-10-26 16:45:42 +0300
commite34b8feeaa4b65725b25f49c9b08a0f8707e8e86 (patch)
tree061cbee6c5cedd9509877eee5e6caab554bbe9c3 /drivers/gpu/drm/ttm
parent230c079fdcf45efacd316a76c3132b9f42cd3565 (diff)
downloadlinux-e34b8feeaa4b65725b25f49c9b08a0f8707e8e86.tar.xz
drm/ttm: merge ttm_dma_tt back into ttm_tt
It makes no difference to kmalloc if the structure is 48 or 64 bytes in size. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/396950/
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c55
4 files changed, 58 insertions, 73 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5b411252a857..40c72a0f9325 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1192,7 +1192,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
size += ttm_round_pot(struct_size);
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
- size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 661b75d19cad..29e6c29ad60e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1081,28 +1081,28 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
unsigned i, j;
int r;
- r = ttm_pool_populate(&tt->ttm, ctx);
+ r = ttm_pool_populate(tt, ctx);
if (r)
return r;
- for (i = 0; i < tt->ttm.num_pages; ++i) {
- struct page *p = tt->ttm.pages[i];
+ for (i = 0; i < tt->num_pages; ++i) {
+ struct page *p = tt->pages[i];
size_t num_pages = 1;
- for (j = i + 1; j < tt->ttm.num_pages; ++j) {
- if (++p != tt->ttm.pages[j])
+ for (j = i + 1; j < tt->num_pages; ++j) {
+ if (++p != tt->pages[j])
break;
++num_pages;
}
- tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
+ tt->dma_address[i] = dma_map_page(dev, tt->pages[i],
0, num_pages * PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, tt->dma_address[i])) {
@@ -1111,7 +1111,7 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
PAGE_SIZE, DMA_BIDIRECTIONAL);
tt->dma_address[i] = 0;
}
- ttm_pool_unpopulate(&tt->ttm);
+ ttm_pool_unpopulate(tt);
return -EFAULT;
}
@@ -1124,21 +1124,21 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
}
EXPORT_SYMBOL(ttm_populate_and_map_pages);
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt)
{
unsigned i, j;
- for (i = 0; i < tt->ttm.num_pages;) {
- struct page *p = tt->ttm.pages[i];
+ for (i = 0; i < tt->num_pages;) {
+ struct page *p = tt->pages[i];
size_t num_pages = 1;
- if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
+ if (!tt->dma_address[i] || !tt->pages[i]) {
++i;
continue;
}
- for (j = i + 1; j < tt->ttm.num_pages; ++j) {
- if (++p != tt->ttm.pages[j])
+ for (j = i + 1; j < tt->num_pages; ++j) {
+ if (++p != tt->pages[j])
break;
++num_pages;
@@ -1149,7 +1149,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
i += num_pages;
}
- ttm_pool_unpopulate(&tt->ttm);
+ ttm_pool_unpopulate(tt);
}
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index a9aaed7e618a..c0353c25efd6 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -832,11 +832,10 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
* return dma_page pointer if success, otherwise NULL.
*/
static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_dma_tt *ttm_dma,
+ struct ttm_tt *ttm,
unsigned index)
{
struct dma_page *d_page = NULL;
- struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
int count;
@@ -845,8 +844,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
- ttm_dma->dma_address[index] = d_page->dma;
- list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ ttm->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm->pages_list);
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
@@ -854,9 +853,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
return d_page;
}
-static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
+static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
gfp_t gfp_flags;
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
@@ -883,11 +881,10 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev,
struct ttm_operation_ctx *ctx)
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -901,7 +898,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
return -ENOMEM;
- INIT_LIST_HEAD(&ttm_dma->pages_list);
+ INIT_LIST_HEAD(&ttm->pages_list);
i = 0;
type = ttm_to_type(ttm->page_flags, ttm->caching);
@@ -912,7 +909,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true);
pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
if (IS_ERR_OR_NULL(pool))
@@ -922,21 +919,21 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
while (num_pages >= HPAGE_PMD_NR) {
unsigned j;
- d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ d_page = ttm_dma_pool_get_pages(pool, ttm, i);
if (!d_page)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm, dev);
return -ENOMEM;
}
d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
ttm->pages[j] = ttm->pages[j - 1] + 1;
- ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
+ ttm->dma_address[j] = ttm->dma_address[j - 1] +
PAGE_SIZE;
}
@@ -949,7 +946,7 @@ skip_huge:
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false);
pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool))
@@ -957,16 +954,16 @@ skip_huge:
}
while (num_pages) {
- d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ d_page = ttm_dma_pool_get_pages(pool, ttm, i);
if (!d_page) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm, dev);
return -ENOMEM;
}
@@ -980,10 +977,9 @@ skip_huge:
EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -997,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
if (pool) {
count = 0;
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ list_for_each_entry_safe(d_page, next, &ttm->pages_list,
page_list) {
if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
continue;
@@ -1027,7 +1023,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* make sure pages array match list and count number of pages */
count = 0;
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ list_for_each_entry_safe(d_page, next, &ttm->pages_list,
page_list) {
ttm->pages[count] = d_page->p;
count++;
@@ -1048,7 +1044,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
pool->nfrees += count;
} else {
pool->npages_free += count;
- list_splice(&ttm_dma->pages_list, &pool->free_list);
+ list_splice(&ttm->pages_list, &pool->free_list);
/*
* Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
* to free in order to minimize calls to set_memory_wb().
@@ -1059,10 +1055,10 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
+ INIT_LIST_HEAD(&ttm->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
- ttm_dma->dma_address[i] = 0;
+ ttm->dma_address[i] = 0;
}
/* shrink pool if necessary (only on !is_cached pools)*/
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index dc1dad982f28..65c4254eea5c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -92,21 +92,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
- sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
- if (!ttm->ttm.pages)
+ ttm->pages = kvmalloc_array(ttm->num_pages,
+ sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->pages)
return -ENOMEM;
- ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+
+ ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
return 0;
}
-static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+ ttm->dma_address = kvmalloc_array(ttm->num_pages,
sizeof(*ttm->dma_address),
GFP_KERNEL | __GFP_ZERO);
if (!ttm->dma_address)
@@ -138,8 +139,10 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->num_pages = bo->num_pages;
ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
+ ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
+ INIT_LIST_HEAD(&ttm->pages_list);
ttm->caching = caching;
}
@@ -158,20 +161,21 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
- kvfree(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm->dma_address);
ttm->pages = NULL;
+ ttm->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
ttm_tt_init_fields(ttm, bo, page_flags, caching);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
+ if (ttm_dma_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -179,19 +183,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_dma_tt_init);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
int ret;
ttm_tt_init_fields(ttm, bo, page_flags, caching);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
if (page_flags & TTM_PAGE_FLAG_SG)
- ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_sg_tt_alloc_page_directory(ttm);
else
- ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_dma_tt_alloc_page_directory(ttm);
if (ret) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -200,19 +202,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_sg_tt_init);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- if (ttm->pages)
- kvfree(ttm->pages);
- else
- kvfree(ttm_dma->dma_address);
- ttm->pages = NULL;
- ttm_dma->dma_address = NULL;
-}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
-
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;