summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
diff options
context:
space:
mode:
authorZack Rusin <zackr@vmware.com>2021-11-05 22:38:45 +0300
committerZack Rusin <zackr@vmware.com>2021-12-01 19:58:35 +0300
commitf6be23264bbac88d1e2bb39658e1b8a397e3f46d (patch)
tree2509fa3782425491c26f9c5e90ff9b584d2f70ed /drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
parentc451af78f301ff5156998d571c37cab329c10051 (diff)
downloadlinux-f6be23264bbac88d1e2bb39658e1b8a397e3f46d.tar.xz
drm/vmwgfx: Introduce a new placement for MOB page tables
For larger (bigger than a page) and noncontiguous mobs we have to create page tables that allow the host to find the memory. Those page tables just used regular system memory. Unfortunately in TTM those BO's are not allowed to be busy thus can't be fenced and we have to fence those bo's because we don't want to destroy the page tables while the host is still executing the command buffers which might be accessing them. To solve it we introduce a new placement VMW_PL_SYSTEM which is very similar to TTM_PL_SYSTEM except that it allows fencing. This fixes kernel oops'es during unloading of the driver (and pci hot remove/add) which were caused by busy BO's in TTM_PL_SYSTEM being present in the delayed deletion list in TTM (TTM_PL_SYSTEM manager is destroyed before the delayed deletions are executed) Signed-off-by: Zack Rusin <zackr@vmware.com> Reviewed-by: Martin Krastev <krastevm@vmware.com> Cc: Christian König <christian.koenig@amd.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211105193845.258816-5-zackr@vmware.com
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c58
1 files changed, 26 insertions, 32 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index e899a936a42a..b15228e7dbeb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
}
};
+static const struct ttm_place vmw_sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = VMW_PL_SYSTEM,
+ .flags = 0
+};
+
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
@@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-static const struct ttm_place evictable_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
+struct ttm_placement vmw_pt_sys_placement = {
+ .num_placement = 1,
+ .placement = &vmw_sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vmw_sys_placement_flags
};
static const struct ttm_place nonfixed_placement_flags[] = {
@@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = {
}
};
-struct ttm_placement vmw_evictable_placement = {
- .num_placement = 4,
- .placement = evictable_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
struct ttm_placement vmw_srf_placement = {
.num_placement = 1,
.num_busy_placement = 2,
@@ -484,6 +467,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
break;
+ case VMW_PL_SYSTEM:
+ /* Nothing to be done for a system bind */
+ break;
default:
BUG();
}
@@ -507,6 +493,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev,
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
+ case VMW_PL_SYSTEM:
+ break;
default:
BUG();
}
@@ -624,6 +612,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
+ case VMW_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
@@ -670,6 +659,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
(void) ttm_bo_wait(bo, false, false);
}
+static bool vmw_memtype_is_system(uint32_t mem_type)
+{
+ return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
+}
+
static int vmw_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
@@ -680,7 +674,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
- if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+ if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
if (ret)
return ret;
@@ -689,7 +683,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
@@ -736,7 +730,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
int ret;
ret = vmw_bo_create_kernel(dev_priv, bo_size,
- &vmw_sys_placement,
+ &vmw_pt_sys_placement,
&bo);
if (unlikely(ret != 0))
return ret;