summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_gt_pagefault.c
diff options
context:
space:
mode:
authorMatt Roper <matthew.d.roper@intel.com>2023-06-02 00:52:25 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-20 02:34:14 +0300
commit876611c2b75689c6bea43bdbbbef9b358f71526a (patch)
tree9a5ac2da4dd0a51b434b21713c38796c25631f74 /drivers/gpu/drm/xe/xe_gt_pagefault.c
parentebd288cba7db7097ad50a4736ded94cb0d92fadf (diff)
downloadlinux-876611c2b75689c6bea43bdbbbef9b358f71526a.tar.xz
drm/xe: Memory allocations are tile-based, not GT-based
Since memory and address spaces are a tile concept rather than a GT concept, we need to plumb tile-based handling through lots of memory-related code. Note that one remaining shortcoming here that will need to be addressed before media GT support can be re-enabled is that although the address space is shared between a tile's GTs, each GT caches the PTEs independently in their own TLB and thus TLB invalidation should be handled at the GT level. v2: - Fix kunit test build. Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20230601215244.678611-13-matthew.d.roper@intel.com Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_pagefault.c')
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index f4f3d95ae6b1..1ec140aaf2a7 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -69,10 +69,10 @@ static bool access_is_atomic(enum access_type access_type)
return access_type == ACCESS_TYPE_ATOMIC;
}
-static bool vma_is_valid(struct xe_gt *gt, struct xe_vma *vma)
+static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- return BIT(gt->info.id) & vma->gt_present &&
- !(BIT(gt->info.id) & vma->usm.gt_invalidated);
+ return BIT(tile->id) & vma->tile_present &&
+ !(BIT(tile->id) & vma->usm.tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, struct xe_vma *lookup)
@@ -152,7 +152,7 @@ retry_userptr:
atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
- if (vma_is_valid(gt, vma) && !atomic)
+ if (vma_is_valid(tile, vma) && !atomic)
goto unlock_vm;
/* TODO: Validate fault */
@@ -208,8 +208,8 @@ retry_userptr:
/* Bind VMA only to the GT that has faulted */
trace_xe_vma_pf_bind(vma);
- fence = __xe_pt_bind_vma(gt, vma, xe_gt_migrate_engine(gt), NULL, 0,
- vma->gt_present & BIT(gt->info.id));
+ fence = __xe_pt_bind_vma(tile, vma, xe_gt_migrate_engine(gt), NULL, 0,
+ vma->tile_present & BIT(tile->id));
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
goto unlock_dma_resv;
@@ -225,7 +225,7 @@ retry_userptr:
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(vma);
- vma->usm.gt_invalidated &= ~BIT(gt->info.id);
+ vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
if (only_needs_bo_lock(bo))