summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_vm.h
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-07-08 08:23:57 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:35:18 +0300
commitb06d47be7c83165d3b3e45e1d5f9520b79c7f5cc (patch)
treeb27852e1de1904c7dc2b689f4594e63ea3c6f685 /drivers/gpu/drm/xe/xe_vm.h
parent5cecdd0bb6bf4b8979b7d071017560daecfc9200 (diff)
downloadlinux-b06d47be7c83165d3b3e45e1d5f9520b79c7f5cc.tar.xz
drm/xe: Port Xe to GPUVA
Rather than open coding VM binds and VMA tracking, use the GPUVA library. GPUVA provides a common infrastructure for VM binds to use mmap / munmap semantics and support for VK sparse bindings. The concepts are: 1) xe_vm inherits from drm_gpuva_manager 2) xe_vma inherits from drm_gpuva 3) xe_vma_op inherits from drm_gpuva_op 4) VM bind operations (MAP, UNMAP, PREFETCH, UNMAP_ALL) call into the GPUVA code to generate an VMA operations list which is parsed, committed, and executed. v2 (CI): Add break after default in case statement. v3: Rebase v4: Fix some error handling v5: Use unlocked version VMA in error paths v6: Rebase, address some review feedback mainly Thomas H v7: Fix compile error in xe_vma_op_unwind, address checkpatch Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.h')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h59
1 files changed, 45 insertions, 14 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 644a8aa604e8..d386e72cb974 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -6,6 +6,7 @@
#ifndef _XE_VM_H_
#define _XE_VM_H_
+#include "xe_bo_types.h"
#include "xe_macros.h"
#include "xe_map.h"
#include "xe_vm_types.h"
@@ -22,20 +23,19 @@ struct xe_file;
struct xe_sync_entry;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
-void xe_vm_free(struct kref *ref);
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
{
- kref_get(&vm->refcount);
+ drm_gpuvm_get(&vm->gpuvm);
return vm;
}
static inline void xe_vm_put(struct xe_vm *vm)
{
- kref_put(&vm->refcount, xe_vm_free);
+ drm_gpuvm_put(&vm->gpuvm);
}
int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
@@ -61,7 +61,22 @@ static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
}
struct xe_vma *
-xe_vm_find_overlapping_vma(struct xe_vm *vm, struct xe_vma *vma);
+xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
+
+static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
+{
+ return container_of(gpuva->vm, struct xe_vm, gpuvm);
+}
+
+static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
+{
+ return container_of(gpuva, struct xe_vma, gpuva);
+}
+
+static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
+{
+ return container_of(op, struct xe_vma_op, base);
+}
/**
* DOC: Provide accessors for vma members to facilitate easy change of
@@ -69,12 +84,12 @@ xe_vm_find_overlapping_vma(struct xe_vm *vm, struct xe_vma *vma);
*/
static inline u64 xe_vma_start(struct xe_vma *vma)
{
- return vma->start;
+ return vma->gpuva.va.addr;
}
static inline u64 xe_vma_size(struct xe_vma *vma)
{
- return vma->end - vma->start + 1;
+ return vma->gpuva.va.range;
}
static inline u64 xe_vma_end(struct xe_vma *vma)
@@ -84,32 +99,33 @@ static inline u64 xe_vma_end(struct xe_vma *vma)
static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
{
- return vma->bo_offset;
+ return vma->gpuva.gem.offset;
}
static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
{
- return vma->bo;
+ return !vma->gpuva.gem.obj ? NULL :
+ container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
}
static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
{
- return vma->vm;
+ return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
}
static inline bool xe_vma_read_only(struct xe_vma *vma)
{
- return vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
+ return vma->gpuva.flags & XE_VMA_READ_ONLY;
}
static inline u64 xe_vma_userptr(struct xe_vma *vma)
{
- return vma->userptr.ptr;
+ return vma->gpuva.gem.offset;
}
static inline bool xe_vma_is_null(struct xe_vma *vma)
{
- return vma->pte_flags & XE_PTE_FLAG_NULL;
+ return vma->gpuva.flags & DRM_GPUVA_SPARSE;
}
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
@@ -122,8 +138,6 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
}
-#define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
-
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -218,6 +232,23 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
+/**
+ * xe_vm_resv() - Return's the vm's reservation object
+ * @vm: The vm
+ *
+ * Return: Pointer to the vm's reservation object.
+ */
+static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
+{
+ return drm_gpuvm_resv(&vm->gpuvm);
+}
+
+/**
+ * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
+ * @vm: The vm
+ */
+#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define vm_dbg drm_dbg
#else