summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c61
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c18
5 files changed, 76 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 29e9e8d5b6fe..f266b68cf012 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
bool unpinned;
/*
- * Attempt to pin all of the buffers into the GTT.
- * This is done in 2 phases:
+ * We have one more buffers that we couldn't bind, which could be due to
+ * various reasons. To resolve this we have 4 passes, with every next
+ * level turning the screws tighter:
*
- * 1. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 2. Bind new objects.
+ * 0. Unbind all objects that do not match the GTT constraints for the
+ * execbuffer (fenceable, mappable, alignment etc). Bind all new
+ * objects. This avoids unnecessary unbinding of later objects in order
+ * to make room for the earlier objects *unless* we need to defragment.
*
- * This avoid unnecessary unbinding of later objects in order to make
- * room for the earlier objects *unless* we need to defragment.
+ * 1. Reorder the buffers, where objects with the most restrictive
+ * placement requirements go first (ignoring fixed location buffers for
+ * now). For example, objects needing the mappable aperture (the first
+ * 256M of GTT), should go first vs objects that can be placed just
+ * about anywhere. Repeat the previous pass.
*
- * Defragmenting is skipped if all objects are pinned at a fixed location.
+ * 2. Consider buffers that are pinned at a fixed location. Also try to
+ * evict the entire VM this time, leaving only objects that we were
+ * unable to lock. Try again to bind the buffers. (still using the new
+ * buffer order).
+ *
+ * 3. We likely have object lock contention for one or more stubborn
+ * objects in the VM, for which we need to evict to make forward
+ * progress (perhaps we are fighting the shrinker?). When evicting the
+ * VM this time around, anything that we can't lock we now track using
+ * the busy_bo, using the full lock (after dropping the vm->mutex to
+ * prevent deadlocks), instead of trylock. We then continue to evict the
+ * VM, this time with the stubborn object locked, which we can now
+ * hopefully unbind (if still bound in the VM). Repeat until the VM is
+ * evicted. Finally we should be able bind everything.
*/
- for (pass = 0; pass <= 2; pass++) {
+ for (pass = 0; pass <= 3; pass++) {
int pin_flags = PIN_USER | PIN_VALIDATE;
if (pass == 0)
pin_flags |= PIN_NONBLOCK;
if (pass >= 1)
- unpinned = eb_unbind(eb, pass == 2);
+ unpinned = eb_unbind(eb, pass >= 2);
if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
- err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
+ mutex_unlock(&eb->context->vm->mutex);
+ }
+ if (err)
+ return err;
+ }
+
+ if (pass == 3) {
+retry:
+ err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ if (!err) {
+ struct drm_i915_gem_object *busy_bo = NULL;
+
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
mutex_unlock(&eb->context->vm->mutex);
+ if (err && busy_bo) {
+ err = i915_gem_object_lock(busy_bo, &eb->ww);
+ i915_gem_object_put(busy_bo);
+ if (!err)
+ goto retry;
+ }
}
if (err)
return err;
@@ -2427,7 +2464,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- prandom_u32_max(num_vcs_engines(dev_priv));
+ get_random_u32_below(num_vcs_engines(dev_priv));
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c29efdef8313..0ad44f3868de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -369,7 +369,7 @@ retry:
if (vma == ERR_PTR(-ENOSPC)) {
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
if (!ret) {
- ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+ ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
if (ret)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 733696057761..1a0886b8aaa1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -785,6 +785,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
return false;
+ if (obj->flags & I915_BO_ALLOC_CCS_AUX)
+ return true;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a7b70701617a..19c9bdd8f905 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -327,16 +327,18 @@ struct drm_i915_gem_object {
* dealing with userspace objects the CPU fault handler is free to ignore this.
*/
#define I915_BO_ALLOC_GPU_ONLY BIT(6)
+#define I915_BO_ALLOC_CCS_AUX BIT(7)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER | \
I915_BO_ALLOC_PM_VOLATILE | \
I915_BO_ALLOC_PM_EARLY | \
- I915_BO_ALLOC_GPU_ONLY)
-#define I915_BO_READONLY BIT(7)
-#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED BIT(9)
+ I915_BO_ALLOC_GPU_ONLY | \
+ I915_BO_ALLOC_CCS_AUX)
+#define I915_BO_READONLY BIT(8)
+#define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED BIT(10)
/**
* @mem_flags - Mutable placement-related flags
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 07e49f22f2de..7e67742bc65e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
container_of(bo->bdev, typeof(*i915), bdev);
struct drm_i915_gem_object *backup;
struct ttm_operation_ctx ctx = {};
+ unsigned int flags;
int err = 0;
if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
@@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
return 0;
- backup = i915_gem_object_create_shmem(i915, obj->base.size);
+ /*
+ * It seems that we might have some framebuffers still pinned at this
+ * stage, but for such objects we might also need to deal with the CCS
+ * aux state. Make sure we force the save/restore of the CCS state,
+ * otherwise we might observe display corruption, when returning from
+ * suspend.
+ */
+ flags = 0;
+ if (i915_gem_object_needs_ccs_pages(obj)) {
+ WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
+ WARN_ON_ONCE(!pm_apply->allow_gpu);
+
+ flags = I915_BO_ALLOC_CCS_AUX;
+ }
+ backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ obj->base.size, 0, flags);
if (IS_ERR(backup))
return PTR_ERR(backup);