summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_device.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2023-07-19 11:38:12 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:37:36 +0300
commite3d2309250d49e4558b0abe95924b18f74995607 (patch)
tree6e21f3a5aca6240848404fbcfc2fb5779a3f5568 /drivers/gpu/drm/xe/xe_device.c
parent7d623575a34539c0302a3ed3ec7321efcb281e37 (diff)
downloadlinux-e3d2309250d49e4558b0abe95924b18f74995607.tar.xz
drm/xe: add lockdep annotation for xe_device_mem_access_get()
The atomics here might hide potential issues, also rpm core is not holding any lock when calling our rpm resume callback, so add a dummy lock with the idea that xe_pm_runtime_resume() is eventually going to be called when we are holding it. This only needs to happen once and then lockdep can validate all callers and their locks. v2: (Thomas Hellström) - Prefer static lockdep_map instead of full blown mutex. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Acked-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_device.c')
-rw-r--r--drivers/gpu/drm/xe/xe_device.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 47401bb49958..b1f36c986f0d 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -34,6 +34,12 @@
#include "xe_vm_madvise.h"
#include "xe_wait_user_fence.h"
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map xe_device_mem_access_lockdep_map = {
+ .name = "xe_device_mem_access_lockdep_map"
+};
+#endif
+
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
{
struct xe_file *xef;
@@ -410,10 +416,28 @@ void xe_device_mem_access_get(struct xe_device *xe)
if (xe_pm_read_callback_task(xe) == current)
return;
+ /*
+ * Since the resume here is synchronous it can be quite easy to deadlock
+ * if we are not careful. Also in practice it might be quite timing
+ * sensitive to ever see the 0 -> 1 transition with the callers locks
+ * held, so deadlocks might exist but are hard for lockdep to ever see.
+ * With this in mind, help lockdep learn about the potentially scary
+ * stuff that can happen inside the runtime_resume callback by acquiring
+ * a dummy lock (it doesn't protect anything and gets compiled out on
+ * non-debug builds). Lockdep then only needs to see the
+ * mem_access_lockdep_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> mem_access_lockdep_map.
+ * For example if the (callers_locks) are ever grabbed in the
+ * runtime_resume callback, lockdep should give us a nice splat.
+ */
+ lock_map_acquire(&xe_device_mem_access_lockdep_map);
+
xe_pm_runtime_get(xe);
ref = atomic_inc_return(&xe->mem_access.ref);
XE_WARN_ON(ref == S32_MAX);
+
+ lock_map_release(&xe_device_mem_access_lockdep_map);
}
void xe_device_mem_access_put(struct xe_device *xe)