summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2022-10-03 19:04:02 +0300
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2022-10-03 19:04:02 +0300
commit97acb6a8fcc4e5c2cdc2693a35acdc5a7461aaa3 (patch)
treec4f1a18b38d655b7806a72515992bd9aae14ef53 /drivers/gpu/drm/i915/i915_gem.c
parent6fa964c045a6bc3321a9186e87bfbcfd1059b0f1 (diff)
parent7860d720a84c74b2761c6b7995392a798ab0a3cb (diff)
downloadlinux-97acb6a8fcc4e5c2cdc2693a35acdc5a7461aaa3.tar.xz
Merge drm/drm-next into drm-intel-gt-next
Daniele needs 84d4333c1e28 ("misc/mei: Add NULL check to component match callback functions") in order to merge the DG2 HuC patches. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c44
1 files changed, 40 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b37daf9d4bd0..55d605c0c55d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1039,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -1089,6 +1089,43 @@ out:
return err;
}
+/*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
+ rcu_barrier();
+ }
+}
+
+/*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ flush_workqueue(i915->wq);
+ rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
+ }
+
+ drain_workqueue(i915->wq);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
struct intel_gt *gt;
@@ -1097,8 +1134,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
- mkwrite_device_info(dev_priv)->page_sizes =
- I915_GTT_PAGE_SIZE_4K;
+ RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
ret = i915_gem_init_userptr(dev_priv);
if (ret)
@@ -1235,7 +1271,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
- spin_lock_init(&dev_priv->fb_tracking.lock);
+ spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)