summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c71
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h25
5 files changed, 153 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c3b9fcf301a0..6446911077d0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2200,6 +2200,7 @@ struct drm_i915_gem_request {
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
static inline uint32_t
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 80b509bed6e0..b9e0989063b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2485,6 +2485,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
} else
ringbuf = ring->buffer;
+ /*
+ * To ensure that this call will not fail, space for its emissions
+ * should already have been reserved in the ring buffer. Let the ring
+ * know that it is time to use that space up.
+ */
+ intel_ring_reserved_space_use(ringbuf);
+
request_start = intel_ring_get_tail(ringbuf);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
@@ -2567,6 +2574,9 @@ int __i915_add_request(struct intel_engine_cs *ring,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
+ /* Sanity check that the reserved size was large enough. */
+ intel_ring_reserved_space_end(ringbuf);
+
return 0;
}
@@ -2665,6 +2675,26 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
if (ret)
goto err;
+ /*
+ * Reserve space in the ring buffer for all the commands required to
+ * eventually emit this request. This is to guarantee that the
+ * i915_add_request() call can't fail. Note that the reserve may need
+ * to be redone if the request is not actually submitted straight
+ * away, e.g. because a GPU scheduler has deferred it.
+ *
+ * Note further that this call merely notes the reserve request. A
+ * subsequent call to *_ring_begin() is required to actually ensure
+ * that the reservation is available. Without the begin, if the
+ * request creator immediately submitted the request without adding
+ * any commands to it then there might not actually be sufficient
+ * room for the submission commands. Unfortunately, the current
+ * *_ring_begin() implementations potentially call back here to
+ * i915_gem_request_alloc(). Thus calling _begin() here would lead to
+ * infinite recursion! Until that back call path is removed, it is
+ * necessary to do a manual _begin() outside.
+ */
+ intel_ring_reserved_space_reserve(req->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
ring->outstanding_lazy_request = req;
return 0;
@@ -2673,6 +2703,13 @@ err:
return ret;
}
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+ intel_ring_reserved_space_cancel(req->ringbuf);
+
+ i915_gem_request_unreference(req);
+}
+
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2b65d29c4801..7451f38b2ef8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -686,6 +686,9 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
unsigned space;
int ret;
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
if (intel_ring_space(ringbuf) >= bytes)
return 0;
@@ -746,6 +749,9 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
+ /* Can't wrap if space has already been reserved! */
+ WARN_ON(ringbuf->reserved_in_use);
+
if (ringbuf->space < rem) {
int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
@@ -769,10 +775,25 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
{
int ret;
+ /*
+ * Add on the reserved size to the request to make sure that after
+ * the intended commands have been emitted, there is guaranteed to
+ * still be enough free space to send them to the hardware.
+ */
+ if (!ringbuf->reserved_in_use)
+ bytes += ringbuf->reserved_size;
+
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = logical_ring_wrap_buffer(ringbuf, ctx);
if (unlikely(ret))
return ret;
+
+ if(ringbuf->reserved_size) {
+ uint32_t size = ringbuf->reserved_size;
+
+ intel_ring_reserved_space_cancel(ringbuf);
+ intel_ring_reserved_space_reserve(ringbuf, size);
+ }
}
if (unlikely(ringbuf->space < bytes)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b70d25bffb60..0c2bf0ed633d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2113,6 +2113,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
unsigned space;
int ret;
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
if (intel_ring_space(ringbuf) >= n)
return 0;
@@ -2140,6 +2143,9 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
struct intel_ringbuffer *ringbuf = ring->buffer;
int rem = ringbuf->size - ringbuf->tail;
+ /* Can't wrap if space has already been reserved! */
+ WARN_ON(ringbuf->reserved_in_use);
+
if (ringbuf->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
@@ -2190,16 +2196,77 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
return 0;
}
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
- int bytes)
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+ /* NB: Until request management is fully tidied up and the OLR is
+ * removed, there are too many ways for get false hits on this
+ * anti-recursion check! */
+ /*WARN_ON(ringbuf->reserved_size);*/
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_size = size;
+
+ /*
+ * Really need to call _begin() here but that currently leads to
+ * recursion problems! This will be fixed later but for now just
+ * return and hope for the best. Note that there is only a real
+ * problem if the create of the request never actually calls _begin()
+ * but if they are not submitting any work then why did they create
+ * the request in the first place?
+ */
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_size = 0;
+ ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_in_use = true;
+ ringbuf->reserved_tail = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(!ringbuf->reserved_in_use);
+ WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+ "request reserved size too small: %d vs %d!\n",
+ ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+
+ ringbuf->reserved_size = 0;
+ ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
+ /*
+ * Add on the reserved size to the request to make sure that after
+ * the intended commands have been emitted, there is guaranteed to
+ * still be enough free space to send them to the hardware.
+ */
+ if (!ringbuf->reserved_in_use)
+ bytes += ringbuf->reserved_size;
+
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
+
+ if(ringbuf->reserved_size) {
+ uint32_t size = ringbuf->reserved_size;
+
+ intel_ring_reserved_space_cancel(ringbuf);
+ intel_ring_reserved_space_reserve(ringbuf, size);
+ }
}
if (unlikely(ringbuf->space < bytes)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 64850293559c..73db3ae8f237 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -106,6 +106,9 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
+ int reserved_size;
+ int reserved_tail;
+ bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -472,4 +475,26 @@ intel_ring_get_request(struct intel_engine_cs *ring)
return ring->outstanding_lazy_request;
}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST 160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
#endif /* _INTEL_RINGBUFFER_H_ */