summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c73
1 files changed, 37 insertions, 36 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f56af0aaafde..262020f8b38d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2797,6 +2797,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring)
{
+ struct intel_ringbuffer *buffer;
+
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2812,18 +2814,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* are the ones that keep the context and ringbuffer backing objects
* pinned in place.
*/
- while (!list_empty(&ring->execlist_queue)) {
- struct drm_i915_gem_request *submit_req;
- submit_req = list_first_entry(&ring->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
- list_del(&submit_req->execlist_link);
+ if (i915.enable_execlists) {
+ spin_lock_irq(&ring->execlist_lock);
+ while (!list_empty(&ring->execlist_queue)) {
+ struct drm_i915_gem_request *submit_req;
+
+ submit_req = list_first_entry(&ring->execlist_queue,
+ struct drm_i915_gem_request,
+ execlist_link);
+ list_del(&submit_req->execlist_link);
- if (submit_req->ctx != ring->default_context)
- intel_lr_context_unpin(submit_req);
+ if (submit_req->ctx != ring->default_context)
+ intel_lr_context_unpin(submit_req);
- i915_gem_request_unreference(submit_req);
+ i915_gem_request_unreference(submit_req);
+ }
+ spin_unlock_irq(&ring->execlist_lock);
}
/*
@@ -2842,6 +2849,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_request_retire(request);
}
+
+ /* Having flushed all requests from all queues, we know that all
+ * ringbuffers must now be empty. However, since we do not reclaim
+ * all space when retiring the request (to prevent HEADs colliding
+ * with rapid ringbuffer wraparound) the amount of available space
+ * upon reset is less than when we start. Do one more pass over
+ * all the ringbuffers to reset last_retired_head.
+ */
+ list_for_each_entry(buffer, &ring->buffers, link) {
+ buffer->last_retired_head = buffer->tail;
+ intel_ring_update_space(buffer);
+ }
}
void i915_gem_reset(struct drm_device *dev)
@@ -3886,7 +3905,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return -ENODEV;
level = I915_CACHE_LLC;
@@ -3929,17 +3948,11 @@ rpm_put:
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined,
- struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
- if (ret)
- return ret;
-
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
@@ -4541,10 +4554,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
+ vma->vm == vm)
return vma;
}
return NULL;
@@ -4633,7 +4644,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
@@ -4649,10 +4659,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
* here because no other code should access these registers other than
* at initialization time.
*/
- for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+ for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, reg_base + i);
- intel_ring_emit(ring, remap_info[i/4]);
+ intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
+ intel_ring_emit(ring, remap_info[i]);
}
intel_ring_advance(ring);
@@ -4820,18 +4830,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev);
if (ret) {
- /*
- * If we got an error and GuC submission is enabled, map
- * the error to -EIO so the GPU will be declared wedged.
- * OTOH, if we didn't intend to use the GuC anyway, just
- * discard the error and carry on.
- */
- DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
- i915.enable_guc_submission ? "" :
- " (ignored)");
- ret = i915.enable_guc_submission ? -EIO : 0;
- if (ret)
- goto out;
+ DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
+ ret = -EIO;
+ goto out;
}
}