summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_bo.c
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2023-09-15 20:26:06 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:41:14 +0300
commitfc678ec7c2e037fcc1bb678403036a9772e61dbd (patch)
treebe2efd3db1ffd47e04c1e284ab364216bc183429 /drivers/gpu/drm/xe/xe_bo.c
parenta455ed04669f03bbb1f22267f1237983e026739f (diff)
downloadlinux-fc678ec7c2e037fcc1bb678403036a9772e61dbd.tar.xz
drm/xe: Reinstate pipelined fence enable_signaling
With the GPUVA conversion, the xe_bo::vmas member became replaced with drm_gem_object::gpuva.list, however there was a couple of usage instances left using the old member. Most notably the pipelined fence enable_signaling. Remove the xe_bo::vmas member completely, fix usage instances and also enable this pipelined fence enable_signaling even for faulting VM:s since we actually wait for bind fences to complete. v2: - Rebase. v3: - Fix display code build error. Cc: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230915172606.14436-1-thomas.hellstrom@linux.intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 998efceb84a4..e812f2b7d5b9 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -456,7 +456,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
dma_resv_assert_held(bo->ttm.base.resv);
- if (!xe_device_in_fault_mode(xe) && !list_empty(&bo->vmas)) {
+ if (!list_empty(&bo->ttm.base.gpuva.list)) {
dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence)
@@ -1049,7 +1049,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
drm_prime_gem_destroy(&bo->ttm.base, NULL);
drm_gem_object_release(&bo->ttm.base);
- xe_assert(xe, list_empty(&bo->vmas));
+ xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
if (bo->ggtt_node.size)
xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
@@ -1232,7 +1232,6 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->props.preferred_gt = XE_BO_PROPS_INVALID;
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
- INIT_LIST_HEAD(&bo->vmas);
INIT_LIST_HEAD(&bo->pinned_link);
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);