summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_psr.c
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2015-07-09 02:21:31 +0300
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-07-09 17:35:35 +0300
commit169de1316c1e69ad169d81c60549479640461630 (patch)
treecca3622cb8877467534efdc4b47d14de2614b0aa /drivers/gpu/drm/i915/intel_psr.c
parentde152b627eb3018de91ec5c5a50b38e17d80a88b (diff)
downloadlinux-169de1316c1e69ad169d81c60549479640461630.tar.xz
drm/i915: PSR: Flush means invalidate + flush
Since flush actually means invalidate + flush we need to force psr exit on PSR flush. On Core platforms there is no way to disable hw tracking and do the pure sw tracking so we simulate it by fully disable psr and reschedule a enable back. So a good idea is to minimize sequential disable/enable in cases we know that HW tracking like when flush has been originated by a flip. Also flip had just invalidated it already. It also uses origin to minimize the a bit the amount of disable/enabled, mainly when flip already had invalidated. With this patch in place it is possible to do a flush on dirty areas properly in a following patch. v2: Remove duplicated exit on HSW+Sprites as pointed out by Paulo. Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_psr.c')
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index d79ba58637d7..6db043f3c1ad 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -680,6 +680,7 @@ void intel_psr_invalidate(struct drm_device *dev,
* intel_psr_flush - Flush PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
@@ -689,7 +690,7 @@ void intel_psr_invalidate(struct drm_device *dev,
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
void intel_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -707,24 +708,25 @@ void intel_psr_flush(struct drm_device *dev,
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
- /*
- * On Haswell sprite plane updates don't result in a psr invalidating
- * signal in the hardware. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering.
- */
- if (IS_HASWELL(dev) &&
- (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
- intel_psr_exit(dev);
-
- /*
- * On Valleyview and Cherryview we don't use hardware tracking so
- * any plane updates or cursor moves don't result in a PSR
- * invalidating. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering. */
- if (frontbuffer_bits && !HAS_DDI(dev))
- intel_psr_exit(dev);
+ if (HAS_DDI(dev)) {
+ /*
+ * By definition every flush should mean invalidate + flush,
+ * however on core platforms let's minimize the
+ * disable/re-enable so we can avoid the invalidate when flip
+ * originated the flush.
+ */
+ if (frontbuffer_bits && origin != ORIGIN_FLIP)
+ intel_psr_exit(dev);
+ } else {
+ /*
+ * On Valleyview and Cherryview we don't use hardware tracking
+ * so any plane updates or cursor moves don't result in a PSR
+ * invalidating. Which means we need to manually fake this in
+ * software for all flushes.
+ */
+ if (frontbuffer_bits)
+ intel_psr_exit(dev);
+ }
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,