summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c396
1 files changed, 159 insertions, 237 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a0248d78190f..4d8f19ab1014 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -47,7 +47,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
@@ -66,6 +65,8 @@
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
@@ -92,7 +93,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
/* Don't handle scatter gather BOs */
@@ -292,11 +293,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
- struct ttm_dma_tt *dma;
dma_addr_t *dma_address;
- dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
- dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+ dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
cpu_addr);
if (r)
@@ -452,7 +451,7 @@ error:
return r;
}
-/**
+/*
* amdgpu_move_blit - Copy an entire buffer to another buffer
*
* This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
@@ -513,116 +512,7 @@ error:
return r;
}
-/**
- * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
- *
- * Called by amdgpu_bo_move().
- */
-static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
-{
- struct ttm_resource *old_mem = &bo->mem;
- struct ttm_resource tmp_mem;
- struct ttm_place placements;
- struct ttm_placement placement;
- int r;
-
- /* create space/pages for new_mem in GTT space */
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = 0;
- placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
- if (unlikely(r)) {
- pr_err("Failed to find GTT space for blit from VRAM\n");
- return r;
- }
-
- /* set caching flags */
- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
- r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
- if (unlikely(r))
- goto out_cleanup;
-
- /* Bind the memory to the GTT space */
- r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
- /* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
- /* move BO (in tmp_mem) to new_mem */
- r = ttm_bo_move_ttm(bo, ctx, new_mem);
-out_cleanup:
- ttm_resource_free(bo, &tmp_mem);
- return r;
-}
-
-/**
- * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
- *
- * Called by amdgpu_bo_move().
- */
-static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
-{
- struct ttm_resource *old_mem = &bo->mem;
- struct ttm_resource tmp_mem;
- struct ttm_placement placement;
- struct ttm_place placements;
- int r;
-
- /* make space in GTT for old_mem buffer */
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = 0;
- placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
- if (unlikely(r)) {
- pr_err("Failed to find GTT space for blit to VRAM\n");
- return r;
- }
-
- /* move/bind old memory to GTT space */
- r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
- /* copy to VRAM */
- r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-out_cleanup:
- ttm_resource_free(bo, &tmp_mem);
- return r;
-}
-
-/**
+/*
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
*
* Called by amdgpu_bo_move()
@@ -646,39 +536,55 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
<= adev->gmc.visible_vram_size;
}
-/**
+/*
* amdgpu_bo_move - Move a buffer object to a new memory location
*
* Called by ttm_bo_handle_move_mem()
*/
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
struct ttm_resource *old_mem = &bo->mem;
int r;
+ if (new_mem->mem_type == TTM_PL_TT) {
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
+ if (r)
+ return r;
+ }
+
/* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
- if (WARN_ON_ONCE(abo->pin_count > 0))
+ if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
return -EINVAL;
adev = amdgpu_ttm_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
- return 0;
+ goto out;
}
- if ((old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM) ||
- (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enough */
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_mem);
- return 0;
+ goto out;
+ }
+ if (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (r)
+ return r;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ goto out;
}
+
if (old_mem->mem_type == AMDGPU_PL_GDS ||
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
@@ -687,27 +593,27 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_OA) {
/* Nothing to save here */
ttm_bo_move_null(bo, new_mem);
- return 0;
+ goto out;
}
- if (!adev->mman.buffer_funcs_enabled) {
- r = -ENODEV;
- goto memcpy;
- }
+ if (adev->mman.buffer_funcs_enabled) {
+ if (((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) ||
+ (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM))) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = 0;
+ return -EMULTIHOP;
+ }
- if (old_mem->mem_type == TTM_PL_VRAM &&
- new_mem->mem_type == TTM_PL_SYSTEM) {
- r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
- } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_VRAM) {
- r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
+ r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
} else {
- r = amdgpu_move_blit(bo, evict,
- new_mem, old_mem);
+ r = -ENODEV;
}
if (r) {
-memcpy:
/* Check that all memory is CPU accessible */
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
@@ -729,12 +635,14 @@ memcpy:
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
+out:
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+ amdgpu_bo_move_notify(bo, evict, new_mem);
return 0;
}
-/**
+/*
* amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
@@ -767,6 +675,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -811,7 +720,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
* TTM backend functions.
*/
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
@@ -824,7 +733,7 @@ struct amdgpu_ttm_tt {
};
#ifdef CONFIG_DRM_AMDGPU_USERPTR
-/**
+/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
*
@@ -929,7 +838,7 @@ out:
return r;
}
-/**
+/*
* amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
* Check if the pages backing this ttm range have been invalidated
*
@@ -943,7 +852,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
if (!gtt || !gtt->userptr)
return false;
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
+ DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
@@ -965,7 +874,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
}
#endif
-/**
+/*
* amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
*
* Called by amdgpu_cs_list_validate(). This creates the page list
@@ -980,8 +889,8 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
ttm->pages[i] = pages ? pages[i] : NULL;
}
-/**
- * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
+/*
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
*
* Called by amdgpu_ttm_backend_bind()
**/
@@ -1020,7 +929,7 @@ release_sg:
return r;
}
-/**
+/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
@@ -1095,13 +1004,13 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
gart_bind_fail:
if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
return r;
}
-/**
+/*
* amdgpu_ttm_backend_bind - Bind GTT memory
*
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
@@ -1130,7 +1039,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
}
}
if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
@@ -1153,13 +1062,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
ttm->pages, gtt->ttm.dma_address, flags);
if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
gtt->bound = true;
return r;
}
-/**
+/*
* amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
* through AGP or GART aperture.
*
@@ -1171,7 +1080,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
@@ -1220,7 +1129,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return 0;
}
-/**
+/*
* amdgpu_ttm_recover_gart - Rebind GTT pages
*
* Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
@@ -1241,7 +1150,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
return r;
}
-/**
+/*
* amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
*
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
@@ -1267,8 +1176,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
if (r)
- DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
+ DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
+ gtt->ttm.num_pages, gtt->offset);
gtt->bound = false;
}
@@ -1282,7 +1191,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
if (gtt->usertask)
put_task_struct(gtt->usertask);
- ttm_dma_tt_fini(&gtt->ttm);
+ ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@@ -1290,13 +1199,16 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
* amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
*
* @bo: The buffer object to create a GTT ttm_tt object around
+ * @page_flags: Page flags to be added to the ttm_tt object
*
* Called by ttm_tt_create().
*/
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt;
+ enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -1304,15 +1216,20 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
}
gtt->gobj = &bo->base;
+ if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
+
/* allocate space for the uninitialized page entries */
- if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
- return &gtt->ttm.ttm;
+ return &gtt->ttm;
}
-/**
+/*
* amdgpu_ttm_tt_populate - Map GTT pages visible to the device
*
* Map the pages of a ttm_tt object to an address space visible
@@ -1332,7 +1249,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm_tt_set_populated(ttm);
return 0;
}
@@ -1352,28 +1268,20 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
-#ifdef CONFIG_SWIOTLB
- if (adev->need_swiotlb && swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
- }
-#endif
-
- /* fall back to generic helper to populate the page array
- * and map them to the device */
- return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
+ return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
}
-/**
+/*
* amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
*
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
@@ -1398,16 +1306,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
return;
adev = amdgpu_ttm_adev(bdev);
-
-#ifdef CONFIG_SWIOTLB
- if (adev->need_swiotlb && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(&gtt->ttm, adev->dev);
- return;
- }
-#endif
-
- /* fall back to generic helper to unmap and unpopulate array */
- ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
+ return ttm_pool_free(&adev->mman.bdev.pool, ttm);
}
/**
@@ -1433,7 +1332,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
- gtt = (void*)bo->ttm;
+ gtt = (void *)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1445,7 +1344,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return 0;
}
-/**
+/*
* amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
@@ -1461,7 +1360,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
return gtt->usertask->mm;
}
-/**
+/*
* amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
* address range for the current task.
*
@@ -1478,14 +1377,14 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
/* Return false if no part of the ttm_tt object lies within
* the range
*/
- size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
return true;
}
-/**
+/*
* amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
@@ -1498,7 +1397,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
return true;
}
-/**
+/*
* amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
@@ -1529,7 +1428,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching_state == tt_cached)
+ if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@@ -1539,9 +1438,10 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
/**
* amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
*
+ * @adev: amdgpu_device pointer
* @ttm: The ttm_tt object to compute the flags for
* @mem: The memory registry backing this ttm_tt object
-
+ *
* Figure out the flags to use for a VM PTE (Page Table Entry).
*/
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
@@ -1558,7 +1458,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
-/**
+/*
* amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
* object.
*
@@ -1699,20 +1599,23 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
return ret;
}
+static void
+amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ amdgpu_bo_move_notify(bo, false, NULL);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .ttm_tt_bind = &amdgpu_ttm_backend_bind,
- .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
- .move_notify = &amdgpu_bo_move_notify,
+ .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
- .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory,
@@ -1866,7 +1769,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
return 0;
}
-/**
+/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
*
@@ -1884,10 +1787,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev,
- &amdgpu_bo_driver,
+ r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
+ adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1895,9 +1798,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
- /* We opt to avoid OOM on system pages allocations */
- adev->mman.bdev.no_retry = true;
-
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
@@ -2003,18 +1903,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
- */
-void amdgpu_ttm_late_init(struct amdgpu_device *adev)
-{
- /* return the VGA stolen memory (if any) back to VRAM */
- if (!adev->mman.keep_stolen_vga_memory)
- amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
-}
-
-/**
+/*
* amdgpu_ttm_fini - De-initialize the TTM memory pools
*/
void amdgpu_ttm_fini(struct amdgpu_device *adev)
@@ -2024,8 +1913,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the stolen vga memory back to VRAM */
- if (adev->mman.keep_stolen_vga_memory)
- amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
@@ -2092,15 +1981,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
adev->mman.buffer_funcs_enabled = enable;
}
+static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_fault_reserve_notify(bo);
+ if (ret)
+ goto unlock;
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+ .fault = amdgpu_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
+ int r;
- if (adev == NULL)
- return -EINVAL;
+ r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ if (unlikely(r != 0))
+ return r;
- return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ vma->vm_ops = &amdgpu_ttm_vm_ops;
+ return 0;
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
@@ -2284,19 +2206,25 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
return 0;
}
+static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
+}
+
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+ {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
};
-/**
+/*
* amdgpu_ttm_vram_read - Linear read access to VRAM
*
* Accesses VRAM via MMIO for debugging purposes.
@@ -2331,7 +2259,7 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
-/**
+/*
* amdgpu_ttm_vram_write - Linear write access to VRAM
*
* Accesses VRAM via MMIO for debugging purposes.
@@ -2384,7 +2312,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-/**
+/*
* amdgpu_ttm_gtt_read - Linear read access to GTT memory
*/
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
@@ -2434,7 +2362,7 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
-/**
+/*
* amdgpu_iomem_read - Virtual read access to GPU mapped memory
*
* This function is used to read memory that has been mapped to the
@@ -2490,7 +2418,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
return result;
}
-/**
+/*
* amdgpu_iomem_write - Virtual write access to GPU mapped memory
*
* This function is used to write memory that has been mapped to the
@@ -2586,12 +2514,6 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
}
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
-
-#ifdef CONFIG_SWIOTLB
- if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
- --count;
-#endif
-
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
return 0;